Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4

AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.

* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
  PRNG: Device tree entry for qrng device.
  vidc:1080p: Set video core timeout value for Thumbnail mode
  msm: sps: improve the debugging support in SPS driver
  board-8064 msm: Overlap secure and non secure video firmware heaps.
  msm: clock: Add handoff ops for 7x30 and copper XO clocks
  msm_fb: display: Wait for external vsync before DTV IOMMU unmap
  msm: Fix ciruclar dependency in debug UART settings
  msm: gdsc: Add GDSC regulator driver for msm-copper
  defconfig: Enable Mobicore Driver.
  mobicore: Add mobicore driver.
  mobicore: rename variable to lower case.
  mobicore: rename folder.
  mobicore: add makefiles
  mobicore: initial import of kernel driver
  ASoC: msm: Add SLIMBUS_2_RX CPU DAI
  board-8064-gpio: Update FUNC for EPM SPI CS
  msm_fb: display: Remove chicken bit config during video playback
  mmc: msm_sdcc: enable the sanitize capability
  msm-fb: display: lm2 writeback support on mpq platfroms
  msm_fb: display: Disable LVDS phy & pll during panel off
  ...

Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 1a0254a..1c2f018 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -51,6 +51,10 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called ad525x_dpot-spi.
 
+config ANDROID_PMEM
+	bool "Android pmem allocator"
+	default y
+
 config ATMEL_PWM
 	tristate "Atmel AT32/AT91 PWM support"
 	depends on HAVE_CLK
@@ -517,7 +521,146 @@
 	---help---
 	 Creates an rfkill entry in sysfs for power control of Bluetooth
 	 TI wl127x chips.
-	 
+
+config APANIC
+	bool "Android kernel panic diagnostics driver"
+	default n
+	---help---
+	 Driver which handles kernel panics and attempts to write
+	 critical debugging data to flash.
+
+config APANIC_PLABEL
+	string "Android panic dump flash partition label"
+	depends on APANIC
+	default "kpanic"
+	---help---
+	 If your platform uses a different flash partition label for storing
+ 	 crashdumps, enter it here.
+
+config TSIF
+	depends on ARCH_MSM
+	tristate "TSIF (Transport Stream InterFace) support"
+	default n
+	---help---
+	  This driver supports low level TSIF interface. It provides API
+	  for upper layer drivers. If you have a TSIF hardware, say
+	  Y here and read <file:Documentation/arm/msm/tsif.txt>.
+
+	  To compile this driver as module, choose M here: the
+	  module will be called msm_tsif.
+
+config TSIF_CHRDEV
+	tristate "TSIF character device"
+	depends on TSIF
+	default n
+	---help---
+	  This driver uses low level TSIF interface. It provides character
+	  device useable from user space programs: one can read TSIF stream
+	  from this device.
+
+	  This driver may be used as example for TSIF API usage.
+
+	  To compile this driver as module, choose M here: the
+	  module will be called tsif_chrdev.
+
+config TSIF_DEBUG
+	bool "Turn on debugging information for tsif driver"
+	depends on TSIF
+	default n
+	---help---
+	  This turns on debugging information for the tsif driver
+
+config TSPP
+	depends on ARCH_MSM
+	tristate "TSPP (Transport Stream Packet Processor) Support"
+	---help---
+	Transport Stream Packet Processor is used to offload the
+	processing of MPEG transport streams from the main processor.
+	This can also be compiled as a loadable module.
+
+config HAPTIC_ISA1200
+	tristate "ISA1200 haptic support"
+	depends on I2C
+	default n
+	help
+	  The ISA1200 is a high performance enhanced haptic driver.
+
+config PMIC8058_PWM
+	tristate "Qualcomm PM8058 PWM support"
+	depends on PMIC8058
+	default y
+	help
+	  This option enables device driver support for the PWM channels
+	  on Qualcomm PM8058 chip. Pulse Width Modulation is used for
+	  purposes including software controlled brightness of backlight,
+	  motor control, and waveform generation.
+
+config PMIC8XXX_VIBRATOR
+	tristate "Qualcomm Vibrator support for PMIC8XXX"
+	depends on MFD_PM8XXX && ANDROID_TIMED_OUTPUT
+	help
+	  This option enables device driver support for the vibrator
+	  on the PM8XXX chips. The vibrator is controlled using the
+	  timed output class.
+
+config PMIC8XXX_NFC
+	tristate "Qualcomm PM8XXX support for Near Field Communication"
+	depends on MFD_PM8XXX
+	help
+	  Qualcomm PM8XXX chips have a module to support NFC (Near Field
+	  Communication). This option enables the driver to support it.
+
+config PMIC8XXX_UPL
+	tristate "Qualcomm PM8XXX support for User Programmable Logic"
+	depends on MFD_PM8XXX
+	help
+	  This option enables device driver support for User Programmable Logic
+	  on Qualcomm PM8XXX chips. The UPL module provides a means to implement
+	  simple truth table based logic via a set of control registers. I/O may
+	  be routed in and out of the UPL module via GPIO or DTEST pins.
+
+config PMIC8058_XOADC
+	tristate "Qualcomm PM8058 XOADC driver"
+	depends on PMIC8058
+	default n
+	help
+	  Enables User processor ADC reads over the XOADC module of Qualcomm's
+	  PMIC8058. Driver interface to program registers of the ADC over
+	  AMUX channels, devices on programmable MPP's and xotherm.
+
+config TZCOM
+	tristate "Trustzone Communicator driver"
+	default n
+	help
+	  Provides a communication interface between userspace and
+	  TrustZone Operating Environment (TZBSP) using Secure Channel
+	  Manager (SCM) interface.
+
+config QSEECOM
+	tristate "Qualcomm Secure Execution Communicator driver"
+	help
+	  Provides a communication interface between userspace and
+	  Qualcomm Secure Execution Environment (QSEE) using Secure Channel
+	  Manager (SCM) interface.
+
+config QFP_FUSE
+	tristate "QFPROM Fuse Read/Write support"
+	help
+	  This option enables device driver to read/write QFPROM
+	  fuses. The ioctls provides the necessary interface
+	  to the fuse block. Currently this is supported only
+	  on FSM targets.
+
+config USB_HSIC_SMSC_HUB
+	tristate "Support for HSIC based MSM on-chip SMSC3503 HUB"
+	depends on USB_EHCI_MSM_HSIC
+	help
+	  Enables support for the HSIC (High Speed Inter-Chip) based
+	  SMSC3503 hub controller present on the Qualcomm chipsets.
+
+	  This adds support for connecting devices like mouse in HSIC
+	  Host mode.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 89540d1..b628976 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_SENSORS_BH1780)	+= bh1780gli.o
 obj-$(CONFIG_SENSORS_BH1770)	+= bh1770glc.o
 obj-$(CONFIG_SENSORS_APDS990X)	+= apds990x.o
+obj-$(CONFIG_ANDROID_PMEM)	+= pmem.o
 obj-$(CONFIG_SGI_IOC4)		+= ioc4.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
 obj-$(CONFIG_KGDB_TESTS)	+= kgdbts.o
@@ -29,6 +30,7 @@
 obj-$(CONFIG_APDS9802ALS)	+= apds9802als.o
 obj-$(CONFIG_ISL29003)		+= isl29003.o
 obj-$(CONFIG_ISL29020)		+= isl29020.o
+obj-$(CONFIG_USB_HSIC_SMSC_HUB) += smsc_hub.o
 obj-$(CONFIG_SENSORS_TSL2550)	+= tsl2550.o
 obj-$(CONFIG_EP93XX_PWM)	+= ep93xx_pwm.o
 obj-$(CONFIG_DS1682)		+= ds1682.o
@@ -52,3 +54,21 @@
 obj-$(CONFIG_MAX8997_MUIC)	+= max8997-muic.o
 obj-$(CONFIG_WL127X_RFKILL)	+= wl127x-rfkill.o
 obj-$(CONFIG_SENSORS_AK8975)	+= akm8975.o
+obj-$(CONFIG_WL127X_RFKILL)	+= wl127x-rfkill.o
+obj-$(CONFIG_APANIC)		+= apanic.o
+obj-$(CONFIG_SENSORS_AK8975)	+= akm8975.o
+obj-$(CONFIG_TSIF) += msm_tsif.o
+msm_tsif-objs := tsif.o
+obj-$(CONFIG_TSIF_CHRDEV) += tsif_chrdev.o
+obj-$(CONFIG_TSPP) += tspp.o
+obj-$(CONFIG_HAPTIC_ISA1200)		+= isa1200.o
+obj-$(CONFIG_PMIC8058_PWM) += pmic8058-pwm.o
+obj-$(CONFIG_PMIC8XXX_VIBRATOR) += pm8xxx-vibrator.o
+obj-$(CONFIG_PMIC8XXX_NFC) += pm8xxx-nfc.o
+obj-$(CONFIG_PMIC8XXX_UPL) += pm8xxx-upl.o
+obj-$(CONFIG_MSM_MEMORY_LOW_POWER_MODE_SUSPEND_DEEP_POWER_DOWN) \
+	+= msm_migrate_pages.o
+obj-$(CONFIG_PMIC8058_XOADC) += pmic8058-xoadc.o
+obj-$(CONFIG_TZCOM) += tzcom.o
+obj-$(CONFIG_QSEECOM) += qseecom.o
+obj-$(CONFIG_QFP_FUSE) += qfp_fuse.o
diff --git a/drivers/misc/apanic.c b/drivers/misc/apanic.c
new file mode 100644
index 0000000..ca875f8
--- /dev/null
+++ b/drivers/misc/apanic.c
@@ -0,0 +1,606 @@
+/* drivers/misc/apanic.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/wakelock.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/mtd/mtd.h>
+#include <linux/notifier.h>
+#include <linux/mtd/mtd.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/preempt.h>
+
+extern void ram_console_enable_console(int);
+
+struct panic_header {
+	u32 magic;
+#define PANIC_MAGIC 0xdeadf00d
+
+	u32 version;
+#define PHDR_VERSION   0x01
+
+	u32 console_offset;
+	u32 console_length;
+
+	u32 threads_offset;
+	u32 threads_length;
+};
+
+struct apanic_data {
+	struct mtd_info		*mtd;
+	struct panic_header	curr;
+	void			*bounce;
+	struct proc_dir_entry	*apanic_console;
+	struct proc_dir_entry	*apanic_threads;
+};
+
+static struct apanic_data drv_ctx;
+static struct work_struct proc_removal_work;
+static DEFINE_MUTEX(drv_mutex);
+
+static unsigned int *apanic_bbt;
+static unsigned int apanic_erase_blocks;
+static unsigned int apanic_good_blocks;
+
+static void set_bb(unsigned int block, unsigned int *bbt)
+{
+	unsigned int flag = 1;
+
+	BUG_ON(block >= apanic_erase_blocks);
+
+	flag = flag << (block%32);
+	apanic_bbt[block/32] |= flag;
+	apanic_good_blocks--;
+}
+
+static unsigned int get_bb(unsigned int block, unsigned int *bbt)
+{
+	unsigned int flag;
+
+	BUG_ON(block >= apanic_erase_blocks);
+
+	flag = 1 << (block%32);
+	return apanic_bbt[block/32] & flag;
+}
+
+static void alloc_bbt(struct mtd_info *mtd, unsigned int *bbt)
+{
+	int bbt_size;
+	apanic_erase_blocks = (mtd->size)>>(mtd->erasesize_shift);
+	bbt_size = (apanic_erase_blocks+32)/32;
+
+	apanic_bbt = kmalloc(bbt_size*4, GFP_KERNEL);
+	memset(apanic_bbt, 0, bbt_size*4);
+	apanic_good_blocks = apanic_erase_blocks;
+}
+static void scan_bbt(struct mtd_info *mtd, unsigned int *bbt)
+{
+	int i;
+
+	for (i = 0; i < apanic_erase_blocks; i++) {
+		if (mtd->block_isbad(mtd, i*mtd->erasesize))
+			set_bb(i, apanic_bbt);
+	}
+}
+
+#define APANIC_INVALID_OFFSET 0xFFFFFFFF
+
+static unsigned int phy_offset(struct mtd_info *mtd, unsigned int offset)
+{
+	unsigned int logic_block = offset>>(mtd->erasesize_shift);
+	unsigned int phy_block;
+	unsigned good_block = 0;
+
+	for (phy_block = 0; phy_block < apanic_erase_blocks; phy_block++) {
+		if (!get_bb(phy_block, apanic_bbt))
+			good_block++;
+		if (good_block == (logic_block + 1))
+			break;
+	}
+
+	if (good_block != (logic_block + 1))
+		return APANIC_INVALID_OFFSET;
+
+	return offset + ((phy_block-logic_block)<<mtd->erasesize_shift);
+}
+
+static void apanic_erase_callback(struct erase_info *done)
+{
+	wait_queue_head_t *wait_q = (wait_queue_head_t *) done->priv;
+	wake_up(wait_q);
+}
+
+static int apanic_proc_read(char *buffer, char **start, off_t offset,
+			       int count, int *peof, void *dat)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	size_t file_length;
+	off_t file_offset;
+	unsigned int page_no;
+	off_t page_offset;
+	int rc;
+	size_t len;
+
+	if (!count)
+		return 0;
+
+	mutex_lock(&drv_mutex);
+
+	switch ((int) dat) {
+	case 1:	/* apanic_console */
+		file_length = ctx->curr.console_length;
+		file_offset = ctx->curr.console_offset;
+		break;
+	case 2:	/* apanic_threads */
+		file_length = ctx->curr.threads_length;
+		file_offset = ctx->curr.threads_offset;
+		break;
+	default:
+		pr_err("Bad dat (%d)\n", (int) dat);
+		mutex_unlock(&drv_mutex);
+		return -EINVAL;
+	}
+
+	if ((offset + count) > file_length) {
+		mutex_unlock(&drv_mutex);
+		return 0;
+	}
+
+	/* We only support reading a maximum of a flash page */
+	if (count > ctx->mtd->writesize)
+		count = ctx->mtd->writesize;
+
+	page_no = (file_offset + offset) / ctx->mtd->writesize;
+	page_offset = (file_offset + offset) % ctx->mtd->writesize;
+
+
+	if (phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize))
+		== APANIC_INVALID_OFFSET) {
+		pr_err("apanic: reading an invalid address\n");
+		mutex_unlock(&drv_mutex);
+		return -EINVAL;
+	}
+	rc = ctx->mtd->read(ctx->mtd,
+		phy_offset(ctx->mtd, (page_no * ctx->mtd->writesize)),
+		ctx->mtd->writesize,
+		&len, ctx->bounce);
+
+	if (page_offset)
+		count -= page_offset;
+	memcpy(buffer, ctx->bounce + page_offset, count);
+
+	*start = count;
+
+	if ((offset + count) == file_length)
+		*peof = 1;
+
+	mutex_unlock(&drv_mutex);
+	return count;
+}
+
+static void mtd_panic_erase(void)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	struct erase_info erase;
+	DECLARE_WAITQUEUE(wait, current);
+	wait_queue_head_t wait_q;
+	int rc, i;
+
+	init_waitqueue_head(&wait_q);
+	erase.mtd = ctx->mtd;
+	erase.callback = apanic_erase_callback;
+	erase.len = ctx->mtd->erasesize;
+	erase.priv = (u_long)&wait_q;
+	for (i = 0; i < ctx->mtd->size; i += ctx->mtd->erasesize) {
+		erase.addr = i;
+		set_current_state(TASK_INTERRUPTIBLE);
+		add_wait_queue(&wait_q, &wait);
+
+		if (get_bb(erase.addr>>ctx->mtd->erasesize_shift, apanic_bbt)) {
+			printk(KERN_WARNING
+			       "apanic: Skipping erase of bad "
+			       "block @%llx\n", erase.addr);
+			set_current_state(TASK_RUNNING);
+			remove_wait_queue(&wait_q, &wait);
+			continue;
+		}
+
+		rc = ctx->mtd->erase(ctx->mtd, &erase);
+		if (rc) {
+			set_current_state(TASK_RUNNING);
+			remove_wait_queue(&wait_q, &wait);
+			printk(KERN_ERR
+			       "apanic: Erase of 0x%llx, 0x%llx failed\n",
+			       (unsigned long long) erase.addr,
+			       (unsigned long long) erase.len);
+			if (rc == -EIO) {
+				if (ctx->mtd->block_markbad(ctx->mtd,
+							    erase.addr)) {
+					printk(KERN_ERR
+					       "apanic: Err marking blk bad\n");
+					goto out;
+				}
+				printk(KERN_INFO
+				       "apanic: Marked a bad block"
+				       " @%llx\n", erase.addr);
+				set_bb(erase.addr>>ctx->mtd->erasesize_shift,
+					apanic_bbt);
+				continue;
+			}
+			goto out;
+		}
+		schedule();
+		remove_wait_queue(&wait_q, &wait);
+	}
+	printk(KERN_DEBUG "apanic: %s partition erased\n",
+	       CONFIG_APANIC_PLABEL);
+out:
+	return;
+}
+
+static void apanic_remove_proc_work(struct work_struct *work)
+{
+	struct apanic_data *ctx = &drv_ctx;
+
+	mutex_lock(&drv_mutex);
+	mtd_panic_erase();
+	memset(&ctx->curr, 0, sizeof(struct panic_header));
+	if (ctx->apanic_console) {
+		remove_proc_entry("apanic_console", NULL);
+		ctx->apanic_console = NULL;
+	}
+	if (ctx->apanic_threads) {
+		remove_proc_entry("apanic_threads", NULL);
+		ctx->apanic_threads = NULL;
+	}
+	mutex_unlock(&drv_mutex);
+}
+
+static int apanic_proc_write(struct file *file, const char __user *buffer,
+				unsigned long count, void *data)
+{
+	schedule_work(&proc_removal_work);
+	return count;
+}
+
+static void mtd_panic_notify_add(struct mtd_info *mtd)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	struct panic_header *hdr = ctx->bounce;
+	size_t len;
+	int rc;
+	int    proc_entry_created = 0;
+
+	if (strcmp(mtd->name, CONFIG_APANIC_PLABEL))
+		return;
+
+	ctx->mtd = mtd;
+
+	alloc_bbt(mtd, apanic_bbt);
+	scan_bbt(mtd, apanic_bbt);
+
+	if (apanic_good_blocks == 0) {
+		printk(KERN_ERR "apanic: no any good blocks?!\n");
+		goto out_err;
+	}
+
+	rc = mtd->read(mtd, phy_offset(mtd, 0), mtd->writesize,
+			&len, ctx->bounce);
+	if (rc && rc == -EBADMSG) {
+		printk(KERN_WARNING
+		       "apanic: Bad ECC on block 0 (ignored)\n");
+	} else if (rc && rc != -EUCLEAN) {
+		printk(KERN_ERR "apanic: Error reading block 0 (%d)\n", rc);
+		goto out_err;
+	}
+
+	if (len != mtd->writesize) {
+		printk(KERN_ERR "apanic: Bad read size (%d)\n", rc);
+		goto out_err;
+	}
+
+	printk(KERN_INFO "apanic: Bound to mtd partition '%s'\n", mtd->name);
+
+	if (hdr->magic != PANIC_MAGIC) {
+		printk(KERN_INFO "apanic: No panic data available\n");
+		mtd_panic_erase();
+		return;
+	}
+
+	if (hdr->version != PHDR_VERSION) {
+		printk(KERN_INFO "apanic: Version mismatch (%d != %d)\n",
+		       hdr->version, PHDR_VERSION);
+		mtd_panic_erase();
+		return;
+	}
+
+	memcpy(&ctx->curr, hdr, sizeof(struct panic_header));
+
+	printk(KERN_INFO "apanic: c(%u, %u) t(%u, %u)\n",
+	       hdr->console_offset, hdr->console_length,
+	       hdr->threads_offset, hdr->threads_length);
+
+	if (hdr->console_length) {
+		ctx->apanic_console = create_proc_entry("apanic_console",
+						      S_IFREG | S_IRUGO, NULL);
+		if (!ctx->apanic_console)
+			printk(KERN_ERR "%s: failed creating procfile\n",
+			       __func__);
+		else {
+			ctx->apanic_console->read_proc = apanic_proc_read;
+			ctx->apanic_console->write_proc = apanic_proc_write;
+			ctx->apanic_console->size = hdr->console_length;
+			ctx->apanic_console->data = (void *) 1;
+			proc_entry_created = 1;
+		}
+	}
+
+	if (hdr->threads_length) {
+		ctx->apanic_threads = create_proc_entry("apanic_threads",
+						       S_IFREG | S_IRUGO, NULL);
+		if (!ctx->apanic_threads)
+			printk(KERN_ERR "%s: failed creating procfile\n",
+			       __func__);
+		else {
+			ctx->apanic_threads->read_proc = apanic_proc_read;
+			ctx->apanic_threads->write_proc = apanic_proc_write;
+			ctx->apanic_threads->size = hdr->threads_length;
+			ctx->apanic_threads->data = (void *) 2;
+			proc_entry_created = 1;
+		}
+	}
+
+	if (!proc_entry_created)
+		mtd_panic_erase();
+
+	return;
+out_err:
+	ctx->mtd = NULL;
+}
+
+static void mtd_panic_notify_remove(struct mtd_info *mtd)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	if (mtd == ctx->mtd) {
+		ctx->mtd = NULL;
+		printk(KERN_INFO "apanic: Unbound from %s\n", mtd->name);
+	}
+}
+
+static struct mtd_notifier mtd_panic_notifier = {
+	.add	= mtd_panic_notify_add,
+	.remove	= mtd_panic_notify_remove,
+};
+
+static int in_panic = 0;
+
+static int apanic_writeflashpage(struct mtd_info *mtd, loff_t to,
+				 const u_char *buf)
+{
+	int rc;
+	size_t wlen;
+	int panic = in_interrupt() | in_atomic();
+
+	if (panic && !mtd->panic_write) {
+		printk(KERN_EMERG "%s: No panic_write available\n", __func__);
+		return 0;
+	} else if (!panic && !mtd->write) {
+		printk(KERN_EMERG "%s: No write available\n", __func__);
+		return 0;
+	}
+
+	to = phy_offset(mtd, to);
+	if (to == APANIC_INVALID_OFFSET) {
+		printk(KERN_EMERG "apanic: write to invalid address\n");
+		return 0;
+	}
+
+	if (panic)
+		rc = mtd->panic_write(mtd, to, mtd->writesize, &wlen, buf);
+	else
+		rc = mtd->write(mtd, to, mtd->writesize, &wlen, buf);
+
+	if (rc) {
+		printk(KERN_EMERG
+		       "%s: Error writing data to flash (%d)\n",
+		       __func__, rc);
+		return rc;
+	}
+
+	return wlen;
+}
+
+extern int log_buf_copy(char *dest, int idx, int len);
+extern void log_buf_clear(void);
+
+/*
+ * Writes the contents of the console to the specified offset in flash.
+ * Returns number of bytes written
+ */
+static int apanic_write_console(struct mtd_info *mtd, unsigned int off)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	int saved_oip;
+	int idx = 0;
+	int rc, rc2;
+	unsigned int last_chunk = 0;
+
+	while (!last_chunk) {
+		saved_oip = oops_in_progress;
+		oops_in_progress = 1;
+		rc = log_buf_copy(ctx->bounce, idx, mtd->writesize);
+		if (rc < 0)
+			break;
+
+		if (rc != mtd->writesize)
+			last_chunk = rc;
+
+		oops_in_progress = saved_oip;
+		if (rc <= 0)
+			break;
+		if (rc != mtd->writesize)
+			memset(ctx->bounce + rc, 0, mtd->writesize - rc);
+
+		rc2 = apanic_writeflashpage(mtd, off, ctx->bounce);
+		if (rc2 <= 0) {
+			printk(KERN_EMERG
+			       "apanic: Flash write failed (%d)\n", rc2);
+			return idx;
+		}
+		if (!last_chunk)
+			idx += rc2;
+		else
+			idx += last_chunk;
+		off += rc2;
+	}
+	return idx;
+}
+
+static int apanic(struct notifier_block *this, unsigned long event,
+			void *ptr)
+{
+	struct apanic_data *ctx = &drv_ctx;
+	struct panic_header *hdr = (struct panic_header *) ctx->bounce;
+	int console_offset = 0;
+	int console_len = 0;
+	int threads_offset = 0;
+	int threads_len = 0;
+	int rc;
+
+	if (in_panic)
+		return NOTIFY_DONE;
+	in_panic = 1;
+#ifdef CONFIG_PREEMPT
+	/* Ensure that cond_resched() won't try to preempt anybody */
+	add_preempt_count(PREEMPT_ACTIVE);
+#endif
+	touch_softlockup_watchdog();
+
+	if (!ctx->mtd)
+		goto out;
+
+	if (ctx->curr.magic) {
+		printk(KERN_EMERG "Crash partition in use!\n");
+		goto out;
+	}
+	console_offset = ctx->mtd->writesize;
+
+	/*
+	 * Write out the console
+	 */
+	console_len = apanic_write_console(ctx->mtd, console_offset);
+	if (console_len < 0) {
+		printk(KERN_EMERG "Error writing console to panic log! (%d)\n",
+		       console_len);
+		console_len = 0;
+	}
+
+	/*
+	 * Write out all threads
+	 */
+	threads_offset = ALIGN(console_offset + console_len,
+			       ctx->mtd->writesize);
+	if (!threads_offset)
+		threads_offset = ctx->mtd->writesize;
+
+	ram_console_enable_console(0);
+
+	log_buf_clear();
+	show_state_filter(0);
+	threads_len = apanic_write_console(ctx->mtd, threads_offset);
+	if (threads_len < 0) {
+		printk(KERN_EMERG "Error writing threads to panic log! (%d)\n",
+		       threads_len);
+		threads_len = 0;
+	}
+
+	/*
+	 * Finally write the panic header
+	 */
+	memset(ctx->bounce, 0, PAGE_SIZE);
+	hdr->magic = PANIC_MAGIC;
+	hdr->version = PHDR_VERSION;
+
+	hdr->console_offset = console_offset;
+	hdr->console_length = console_len;
+
+	hdr->threads_offset = threads_offset;
+	hdr->threads_length = threads_len;
+
+	rc = apanic_writeflashpage(ctx->mtd, 0, ctx->bounce);
+	if (rc <= 0) {
+		printk(KERN_EMERG "apanic: Header write failed (%d)\n",
+		       rc);
+		goto out;
+	}
+
+	printk(KERN_EMERG "apanic: Panic dump sucessfully written to flash\n");
+
+ out:
+#ifdef CONFIG_PREEMPT
+	sub_preempt_count(PREEMPT_ACTIVE);
+#endif
+	in_panic = 0;
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_blk = {
+	.notifier_call	= apanic,
+};
+
+static int panic_dbg_get(void *data, u64 *val)
+{
+	apanic(NULL, 0, NULL);
+	return 0;
+}
+
+static int panic_dbg_set(void *data, u64 val)
+{
+	BUG();
+	return -1;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(panic_dbg_fops, panic_dbg_get, panic_dbg_set, "%llu\n");
+
+int __init apanic_init(void)
+{
+	register_mtd_user(&mtd_panic_notifier);
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
+	debugfs_create_file("apanic", 0644, NULL, NULL, &panic_dbg_fops);
+	memset(&drv_ctx, 0, sizeof(drv_ctx));
+	drv_ctx.bounce = (void *) __get_free_page(GFP_KERNEL);
+	INIT_WORK(&proc_removal_work, apanic_remove_proc_work);
+	printk(KERN_INFO "Android kernel panic handler initialized (bind=%s)\n",
+	       CONFIG_APANIC_PLABEL);
+	return 0;
+}
+
+module_init(apanic_init);
diff --git a/drivers/misc/isa1200.c b/drivers/misc/isa1200.c
new file mode 100644
index 0000000..555dfdd
--- /dev/null
+++ b/drivers/misc/isa1200.c
@@ -0,0 +1,716 @@
+/*
+ *  isa1200.c - Haptic Motor
+ *
+ *  Copyright (C) 2009 Samsung Electronics
+ *  Kyungmin Park <kyungmin.park@samsung.com>
+ *  Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/pwm.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c/isa1200.h>
+#include "../staging/android/timed_output.h"
+
+#define ISA1200_HCTRL0		0x30
+#define ISA1200_HCTRL1		0x31
+#define ISA1200_HCTRL5		0x35
+
+#define ISA1200_HCTRL0_RESET	0x01
+#define ISA1200_HCTRL1_RESET	0x4B
+
+#define ISA1200_HCTRL5_VIB_STRT	0xD5
+#define ISA1200_HCTRL5_VIB_STOP	0x6B
+#define ISA1200_POWER_DOWN_MASK 0x7F
+
+struct isa1200_chip {
+	struct i2c_client *client;
+	struct isa1200_platform_data *pdata;
+	struct pwm_device *pwm;
+	struct hrtimer timer;
+	struct timed_output_dev dev;
+	struct work_struct work;
+	spinlock_t lock;
+	unsigned int enable;
+	unsigned int period_ns;
+	bool is_len_gpio_valid;
+	struct regulator **regs;
+	bool clk_on;
+	u8 hctrl0_val;
+};
+
+static int isa1200_read_reg(struct i2c_client *client, int reg)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0)
+		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+	return ret;
+}
+
+static int isa1200_write_reg(struct i2c_client *client, int reg, u8 value)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(client, reg, value);
+	if (ret < 0)
+		dev_err(&client->dev, "%s: err %d\n", __func__, ret);
+
+	return ret;
+}
+
+static void isa1200_vib_set(struct isa1200_chip *haptic, int enable)
+{
+	int rc = 0;
+
+	if (enable) {
+		/* if hen and len are seperate then enable hen
+		 * otherwise set normal mode bit */
+		if (haptic->is_len_gpio_valid == true)
+			gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 1);
+		else {
+			rc = isa1200_write_reg(haptic->client, ISA1200_HCTRL0,
+				haptic->hctrl0_val | ~ISA1200_POWER_DOWN_MASK);
+			if (rc < 0) {
+				pr_err("%s: i2c write failure\n", __func__);
+				return;
+			}
+		}
+
+		if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+			int period_us = haptic->period_ns / 1000;
+
+			rc = pwm_config(haptic->pwm,
+				(period_us * haptic->pdata->duty) / 100,
+				period_us);
+			if (rc < 0) {
+				pr_err("%s: pwm_config fail\n", __func__);
+				goto chip_dwn;
+			}
+
+			rc = pwm_enable(haptic->pwm);
+			if (rc < 0) {
+				pr_err("%s: pwm_enable fail\n", __func__);
+				goto chip_dwn;
+			}
+		} else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+			/* vote for clock */
+			if (haptic->pdata->clk_enable && !haptic->clk_on) {
+				rc = haptic->pdata->clk_enable(true);
+				if (rc < 0) {
+					pr_err("%s: clk enable failed\n",
+								__func__);
+					goto chip_dwn;
+				}
+				haptic->clk_on = true;
+			}
+
+			rc = isa1200_write_reg(haptic->client,
+						ISA1200_HCTRL5,
+						ISA1200_HCTRL5_VIB_STRT);
+			if (rc < 0) {
+				pr_err("%s: start vibartion fail\n", __func__);
+				goto dis_clk;
+			}
+		}
+	} else {
+		/* if hen and len are seperate then pull down hen
+		 * otherwise set power down bit */
+		if (haptic->is_len_gpio_valid == true)
+			gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+		else {
+			rc = isa1200_write_reg(haptic->client, ISA1200_HCTRL0,
+				haptic->hctrl0_val & ISA1200_POWER_DOWN_MASK);
+			if (rc < 0) {
+				pr_err("%s: i2c write failure\n", __func__);
+				return;
+			}
+		}
+
+		if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+			pwm_disable(haptic->pwm);
+		} else if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+			rc = isa1200_write_reg(haptic->client,
+						ISA1200_HCTRL5,
+						ISA1200_HCTRL5_VIB_STOP);
+			if (rc < 0)
+				pr_err("%s: stop vibartion fail\n", __func__);
+
+			/* de-vote clock */
+			if (haptic->pdata->clk_enable && haptic->clk_on) {
+				rc = haptic->pdata->clk_enable(false);
+				if (rc < 0) {
+					pr_err("%s: clk disable failed\n",
+								__func__);
+					return;
+				}
+				haptic->clk_on = false;
+			}
+		}
+	}
+
+	return;
+
+dis_clk:
+	if (haptic->pdata->clk_enable && haptic->clk_on) {
+		rc = haptic->pdata->clk_enable(false);
+		if (rc < 0) {
+			pr_err("%s: clk disable failed\n", __func__);
+			return;
+		}
+		haptic->clk_on = false;
+	}
+chip_dwn:
+	if (haptic->is_len_gpio_valid == true)
+		gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+	else {
+		rc = isa1200_write_reg(haptic->client, ISA1200_HCTRL0,
+			haptic->hctrl0_val & ISA1200_POWER_DOWN_MASK);
+		if (rc < 0) {
+			pr_err("%s: i2c write failure\n", __func__);
+			return;
+		}
+	}
+}
+
+static void isa1200_chip_work(struct work_struct *work)
+{
+	struct isa1200_chip *haptic;
+
+	haptic = container_of(work, struct isa1200_chip, work);
+	isa1200_vib_set(haptic, haptic->enable);
+}
+
+static void isa1200_chip_enable(struct timed_output_dev *dev, int value)
+{
+	struct isa1200_chip *haptic = container_of(dev, struct isa1200_chip,
+					dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&haptic->lock, flags);
+	hrtimer_cancel(&haptic->timer);
+	if (value == 0)
+		haptic->enable = 0;
+	else {
+		value = (value > haptic->pdata->max_timeout ?
+				haptic->pdata->max_timeout : value);
+		haptic->enable = 1;
+		hrtimer_start(&haptic->timer,
+			ktime_set(value / 1000, (value % 1000) * 1000000),
+			HRTIMER_MODE_REL);
+	}
+	spin_unlock_irqrestore(&haptic->lock, flags);
+	schedule_work(&haptic->work);
+}
+
+static int isa1200_chip_get_time(struct timed_output_dev *dev)
+{
+	struct isa1200_chip *haptic = container_of(dev, struct isa1200_chip,
+					dev);
+
+	if (hrtimer_active(&haptic->timer)) {
+		ktime_t r = hrtimer_get_remaining(&haptic->timer);
+		struct timeval t = ktime_to_timeval(r);
+		return t.tv_sec * 1000 + t.tv_usec / 1000;
+	} else
+		return 0;
+}
+
+static enum hrtimer_restart isa1200_vib_timer_func(struct hrtimer *timer)
+{
+	struct isa1200_chip *haptic = container_of(timer, struct isa1200_chip,
+					timer);
+	haptic->enable = 0;
+	schedule_work(&haptic->work);
+
+	return HRTIMER_NORESTART;
+}
+
+static void dump_isa1200_reg(char *str, struct i2c_client *client)
+{
+	pr_debug("%s reg0x%x=0x%x, reg0x%x=0x%x, reg0x%x=0x%x\n", str,
+		ISA1200_HCTRL0, isa1200_read_reg(client, ISA1200_HCTRL0),
+		ISA1200_HCTRL1, isa1200_read_reg(client, ISA1200_HCTRL1),
+		ISA1200_HCTRL5, isa1200_read_reg(client, ISA1200_HCTRL5));
+}
+
+static int isa1200_setup(struct i2c_client *client)
+{
+	struct isa1200_chip *haptic = i2c_get_clientdata(client);
+	int temp, rc;
+	u8 value;
+
+	gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+	if (haptic->is_len_gpio_valid == true)
+		gpio_set_value_cansleep(haptic->pdata->hap_len_gpio, 0);
+
+	udelay(250);
+
+	gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 1);
+	if (haptic->is_len_gpio_valid == true)
+		gpio_set_value_cansleep(haptic->pdata->hap_len_gpio, 1);
+
+	value =	(haptic->pdata->smart_en << 3) |
+		(haptic->pdata->is_erm << 5) |
+		(haptic->pdata->ext_clk_en << 7);
+
+	rc = isa1200_write_reg(client, ISA1200_HCTRL1, value);
+	if (rc < 0) {
+		pr_err("%s: i2c write failure\n", __func__);
+		goto reset_gpios;
+	}
+
+	if (haptic->pdata->mode_ctrl == PWM_GEN_MODE) {
+		temp = haptic->pdata->pwm_fd.pwm_div;
+		if (temp < 128 || temp > 1024 || temp % 128) {
+			pr_err("%s: Invalid divider\n", __func__);
+			goto reset_hctrl1;
+		}
+		value = ((temp >> 7) - 1);
+	} else if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+		temp = haptic->pdata->pwm_fd.pwm_freq;
+		if (temp < 22400 || temp > 172600 || temp % 22400) {
+			pr_err("%s: Invalid frequency\n", __func__);
+			goto reset_hctrl1;
+		}
+		value = ((temp / 22400) - 1);
+		haptic->period_ns = NSEC_PER_SEC / temp;
+	}
+
+	value |= (haptic->pdata->mode_ctrl << 3) |
+		(haptic->pdata->overdrive_high << 5) |
+		(haptic->pdata->overdrive_en << 5) |
+		(haptic->pdata->chip_en << 7);
+
+	rc = isa1200_write_reg(client, ISA1200_HCTRL0, value);
+	if (rc < 0) {
+		pr_err("%s: i2c write failure\n", __func__);
+		goto reset_hctrl1;
+	}
+
+	/* if hen and len are seperate then pull down hen
+	 * otherwise set power down bit */
+	if (haptic->is_len_gpio_valid == true)
+		gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+	else {
+		rc = isa1200_write_reg(client, ISA1200_HCTRL0,
+					value & ISA1200_POWER_DOWN_MASK);
+		if (rc < 0) {
+			pr_err("%s: i2c write failure\n", __func__);
+			goto reset_hctrl1;
+		}
+	}
+
+	haptic->hctrl0_val = value;
+	dump_isa1200_reg("new:", client);
+	return 0;
+
+reset_hctrl1:
+	i2c_smbus_write_byte_data(client, ISA1200_HCTRL1,
+				ISA1200_HCTRL1_RESET);
+reset_gpios:
+	gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+	if (haptic->is_len_gpio_valid == true)
+		gpio_set_value_cansleep(haptic->pdata->hap_len_gpio, 0);
+	return rc;
+}
+
+static int isa1200_reg_power(struct isa1200_chip *haptic, bool on)
+{
+	const struct isa1200_regulator *reg_info =
+				haptic->pdata->regulator_info;
+	u8 i, num_reg = haptic->pdata->num_regulators;
+	int rc;
+
+	for (i = 0; i < num_reg; i++) {
+		rc = regulator_set_optimum_mode(haptic->regs[i],
+					on ? reg_info[i].load_uA : 0);
+		if (rc < 0) {
+			pr_err("%s: regulator_set_optimum_mode failed(%d)\n",
+							__func__, rc);
+			goto regs_fail;
+		}
+
+		rc = on ? regulator_enable(haptic->regs[i]) :
+			regulator_disable(haptic->regs[i]);
+		if (rc < 0) {
+			pr_err("%s: regulator %sable fail %d\n", __func__,
+					on ? "en" : "dis", rc);
+			regulator_set_optimum_mode(haptic->regs[i],
+					!on ? reg_info[i].load_uA : 0);
+			goto regs_fail;
+		}
+	}
+
+	return 0;
+
+regs_fail:
+	while (i--) {
+		regulator_set_optimum_mode(haptic->regs[i],
+				!on ? reg_info[i].load_uA : 0);
+		!on ? regulator_enable(haptic->regs[i]) :
+			regulator_disable(haptic->regs[i]);
+	}
+	return rc;
+}
+
+static int isa1200_reg_setup(struct isa1200_chip *haptic, bool on)
+{
+	const struct isa1200_regulator *reg_info =
+				haptic->pdata->regulator_info;
+	u8 i, num_reg = haptic->pdata->num_regulators;
+	int rc = 0;
+
+	/* put regulators */
+	if (on == false) {
+		i = num_reg;
+		goto put_regs;
+	}
+
+	haptic->regs = kzalloc(num_reg * sizeof(struct regulator *),
+							GFP_KERNEL);
+	if (!haptic->regs) {
+		pr_err("unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < num_reg; i++) {
+		haptic->regs[i] = regulator_get(&haptic->client->dev,
+							reg_info[i].name);
+		if (IS_ERR(haptic->regs[i])) {
+			rc = PTR_ERR(haptic->regs[i]);
+			pr_err("%s:regulator get failed(%d)\n",	__func__, rc);
+			goto put_regs;
+		}
+
+		if (regulator_count_voltages(haptic->regs[i]) > 0) {
+			rc = regulator_set_voltage(haptic->regs[i],
+				reg_info[i].min_uV, reg_info[i].max_uV);
+			if (rc) {
+				pr_err("%s: regulator_set_voltage failed(%d)\n",
+								__func__, rc);
+				regulator_put(haptic->regs[i]);
+				goto put_regs;
+			}
+		}
+	}
+
+	return rc;
+
+put_regs:
+	while (i--) {
+		if (regulator_count_voltages(haptic->regs[i]) > 0)
+			regulator_set_voltage(haptic->regs[i], 0,
+						reg_info[i].max_uV);
+		regulator_put(haptic->regs[i]);
+	}
+	kfree(haptic->regs);
+	return rc;
+}
+
+static int __devinit isa1200_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct isa1200_chip *haptic;
+	struct isa1200_platform_data *pdata;
+	int ret;
+
+	if (!i2c_check_functionality(client->adapter,
+			I2C_FUNC_SMBUS_BYTE_DATA)) {
+		dev_err(&client->dev, "%s: no support for i2c read/write"
+				"byte data\n", __func__);
+		return -EIO;
+	}
+
+	pdata = client->dev.platform_data;
+	if (!pdata) {
+		dev_err(&client->dev, "%s: no platform data\n", __func__);
+		return -EINVAL;
+	}
+
+	if (pdata->dev_setup) {
+		ret = pdata->dev_setup(true);
+		if (ret < 0) {
+			dev_err(&client->dev, "dev setup failed\n");
+			return -EINVAL;
+		}
+	}
+
+	haptic = kzalloc(sizeof(struct isa1200_chip), GFP_KERNEL);
+	if (!haptic) {
+		ret = -ENOMEM;
+		goto mem_alloc_fail;
+	}
+	haptic->client = client;
+	haptic->enable = 0;
+	haptic->pdata = pdata;
+
+	if (pdata->regulator_info) {
+		ret = isa1200_reg_setup(haptic, true);
+		if (ret) {
+			dev_err(&client->dev, "%s: regulator setup failed\n",
+							__func__);
+			goto reg_setup_fail;
+		}
+
+		ret = isa1200_reg_power(haptic, true);
+		if (ret) {
+			dev_err(&client->dev, "%s: regulator power failed\n",
+							__func__);
+			goto reg_pwr_fail;
+		}
+	}
+
+	if (pdata->power_on) {
+		ret = pdata->power_on(1);
+		if (ret) {
+			dev_err(&client->dev, "%s: power-up failed\n",
+							__func__);
+			goto pwr_up_fail;
+		}
+	}
+
+	spin_lock_init(&haptic->lock);
+	INIT_WORK(&haptic->work, isa1200_chip_work);
+	haptic->clk_on = false;
+
+	hrtimer_init(&haptic->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	haptic->timer.function = isa1200_vib_timer_func;
+
+	/*register with timed output class*/
+	haptic->dev.name = pdata->name;
+	haptic->dev.get_time = isa1200_chip_get_time;
+	haptic->dev.enable = isa1200_chip_enable;
+	ret = timed_output_dev_register(&haptic->dev);
+	if (ret < 0)
+		goto timed_reg_fail;
+
+	i2c_set_clientdata(client, haptic);
+
+	ret = gpio_is_valid(pdata->hap_en_gpio);
+	if (ret) {
+		ret = gpio_request(pdata->hap_en_gpio, "haptic_en_gpio");
+		if (ret) {
+			dev_err(&client->dev, "%s: gpio %d request failed\n",
+					__func__, pdata->hap_en_gpio);
+			goto hen_gpio_fail;
+		}
+	} else {
+		dev_err(&client->dev, "%s: Invalid gpio %d\n", __func__,
+					pdata->hap_en_gpio);
+		goto hen_gpio_fail;
+	}
+
+	haptic->is_len_gpio_valid = true;
+	ret = gpio_is_valid(haptic->pdata->hap_len_gpio);
+	if (ret) {
+		ret = gpio_request(pdata->hap_len_gpio,
+					"haptic_ldo_gpio");
+		if (ret) {
+			dev_err(&client->dev,
+				"%s: gpio %d request failed\n",
+				__func__, pdata->hap_len_gpio);
+			goto len_gpio_fail;
+		}
+	} else {
+		dev_err(&client->dev, "%s: gpio is not used/Invalid %d\n",
+					__func__, pdata->hap_len_gpio);
+		haptic->is_len_gpio_valid = false;
+	}
+
+	ret = isa1200_setup(client);
+	if (ret) {
+		dev_err(&client->dev, "%s: setup fail %d\n", __func__, ret);
+		goto setup_fail;
+	}
+
+	if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE) {
+		haptic->pwm = pwm_request(pdata->pwm_ch_id, id->name);
+		if (IS_ERR(haptic->pwm)) {
+			dev_err(&client->dev, "%s: pwm request failed\n",
+							__func__);
+			ret = PTR_ERR(haptic->pwm);
+			goto reset_hctrl0;
+		}
+	}
+
+	printk(KERN_INFO "%s: %s registered\n", __func__, id->name);
+	return 0;
+
+reset_hctrl0:
+	gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+	if (haptic->is_len_gpio_valid == true)
+		gpio_set_value_cansleep(haptic->pdata->hap_len_gpio, 0);
+	i2c_smbus_write_byte_data(client, ISA1200_HCTRL1,
+				ISA1200_HCTRL1_RESET);
+	i2c_smbus_write_byte_data(client, ISA1200_HCTRL0,
+					ISA1200_HCTRL0_RESET);
+setup_fail:
+	if (haptic->is_len_gpio_valid == true)
+		gpio_free(pdata->hap_len_gpio);
+len_gpio_fail:
+	gpio_free(pdata->hap_en_gpio);
+hen_gpio_fail:
+	timed_output_dev_unregister(&haptic->dev);
+timed_reg_fail:
+	if (pdata->power_on)
+		pdata->power_on(0);
+pwr_up_fail:
+	if (pdata->regulator_info)
+		isa1200_reg_power(haptic, false);
+reg_pwr_fail:
+	if (pdata->regulator_info)
+		isa1200_reg_setup(haptic, false);
+reg_setup_fail:
+	kfree(haptic);
+mem_alloc_fail:
+	if (pdata->dev_setup)
+		pdata->dev_setup(false);
+	return ret;
+}
+
+static int __devexit isa1200_remove(struct i2c_client *client)
+{
+	struct isa1200_chip *haptic = i2c_get_clientdata(client);
+
+	hrtimer_cancel(&haptic->timer);
+	cancel_work_sync(&haptic->work);
+
+	/* turn-off current vibration */
+	isa1200_vib_set(haptic, 0);
+
+	if (haptic->pdata->mode_ctrl == PWM_INPUT_MODE)
+		pwm_free(haptic->pwm);
+
+	timed_output_dev_unregister(&haptic->dev);
+
+	gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+	if (haptic->is_len_gpio_valid == true)
+		gpio_set_value_cansleep(haptic->pdata->hap_len_gpio, 0);
+
+	gpio_free(haptic->pdata->hap_en_gpio);
+	if (haptic->is_len_gpio_valid == true)
+		gpio_free(haptic->pdata->hap_len_gpio);
+
+	/* reset hardware registers */
+	i2c_smbus_write_byte_data(client, ISA1200_HCTRL0,
+				ISA1200_HCTRL0_RESET);
+	i2c_smbus_write_byte_data(client, ISA1200_HCTRL1,
+				ISA1200_HCTRL1_RESET);
+
+
+	/* power-off the chip */
+	if (haptic->pdata->regulator_info) {
+		isa1200_reg_power(haptic, false);
+		isa1200_reg_setup(haptic, false);
+	}
+
+	if (haptic->pdata->power_on)
+		haptic->pdata->power_on(0);
+
+	if (haptic->pdata->dev_setup)
+		haptic->pdata->dev_setup(false);
+
+	kfree(haptic);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int isa1200_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+	struct isa1200_chip *haptic = i2c_get_clientdata(client);
+	int ret;
+
+	hrtimer_cancel(&haptic->timer);
+	cancel_work_sync(&haptic->work);
+	/* turn-off current vibration */
+	isa1200_vib_set(haptic, 0);
+
+	gpio_set_value_cansleep(haptic->pdata->hap_en_gpio, 0);
+	if (haptic->is_len_gpio_valid == true)
+		gpio_set_value_cansleep(haptic->pdata->hap_len_gpio, 0);
+
+	if (haptic->pdata->regulator_info)
+		isa1200_reg_power(haptic, false);
+
+	if (haptic->pdata->power_on) {
+		ret = haptic->pdata->power_on(0);
+		if (ret) {
+			dev_err(&client->dev, "power-down failed\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int isa1200_resume(struct i2c_client *client)
+{
+	struct isa1200_chip *haptic = i2c_get_clientdata(client);
+	int ret;
+
+	if (haptic->pdata->regulator_info)
+		isa1200_reg_power(haptic, true);
+
+	if (haptic->pdata->power_on) {
+		ret = haptic->pdata->power_on(1);
+		if (ret) {
+			dev_err(&client->dev, "power-up failed\n");
+			return ret;
+		}
+	}
+
+	isa1200_setup(client);
+	return 0;
+}
+#else
+#define isa1200_suspend		NULL
+#define isa1200_resume		NULL
+#endif
+
+static const struct i2c_device_id isa1200_id[] = {
+	{ "isa1200_1", 0 },
+	{ },
+};
+MODULE_DEVICE_TABLE(i2c, isa1200_id);
+
+static struct i2c_driver isa1200_driver = {
+	.driver	= {
+		.name	= "isa1200",
+	},
+	.probe		= isa1200_probe,
+	.remove		= __devexit_p(isa1200_remove),
+	.suspend	= isa1200_suspend,
+	.resume		= isa1200_resume,
+	.id_table	= isa1200_id,
+};
+
+static int __init isa1200_init(void)
+{
+	return i2c_add_driver(&isa1200_driver);
+}
+
+static void __exit isa1200_exit(void)
+{
+	i2c_del_driver(&isa1200_driver);
+}
+
+module_init(isa1200_init);
+module_exit(isa1200_exit);
+
+MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
+MODULE_DESCRIPTION("ISA1200 Haptic Motor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/msm_migrate_pages.c b/drivers/misc/msm_migrate_pages.c
new file mode 100644
index 0000000..6dcdd02
--- /dev/null
+++ b/drivers/misc/msm_migrate_pages.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2010, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
+#include <linux/module.h>
+#include <mach/msm_migrate_pages.h>
+
+static unsigned long unstable_memory_state;
+
+unsigned long get_msm_migrate_pages_status(void)
+{
+	return unstable_memory_state;
+}
+EXPORT_SYMBOL(get_msm_migrate_pages_status);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static int migrate_pages_callback(struct notifier_block *self,
+				unsigned long action, void *arg)
+{
+	int ret = 0;
+
+	switch (action) {
+	case MEM_ONLINE:
+		unstable_memory_state = action;
+		break;
+	case MEM_OFFLINE:
+		unstable_memory_state = action;
+		break;
+	case MEM_GOING_OFFLINE:
+	case MEM_GOING_ONLINE:
+	case MEM_CANCEL_ONLINE:
+	case MEM_CANCEL_OFFLINE:
+		break;
+	}
+	return ret;
+}
+#endif
+
+static int __devinit msm_migrate_pages_probe(struct platform_device *pdev)
+{
+#ifdef CONFIG_MEMORY_HOTPLUG
+	hotplug_memory_notifier(migrate_pages_callback, 0);
+#endif
+	unstable_memory_state = 0;
+	return 0;
+}
+
+static struct platform_driver msm_migrate_pages_driver = {
+	.probe = msm_migrate_pages_probe,
+	.driver = {
+		.name = "msm_migrate_pages",
+	},
+};
+
+static int __init msm_migrate_pages_init(void)
+{
+	return platform_driver_register(&msm_migrate_pages_driver);
+}
+
+static void __exit msm_migrate_pages_exit(void)
+{
+	platform_driver_unregister(&msm_migrate_pages_driver);
+}
+
+module_init(msm_migrate_pages_init);
+module_exit(msm_migrate_pages_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Get Status of Unstable Memory Region");
diff --git a/drivers/misc/pm8xxx-nfc.c b/drivers/misc/pm8xxx-nfc.c
new file mode 100644
index 0000000..1aaa3e3
--- /dev/null
+++ b/drivers/misc/pm8xxx-nfc.c
@@ -0,0 +1,311 @@
+/* Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8XXX NFC driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/nfc.h>
+
+/* PM8XXX NFC */
+#define SSBI_REG_NFC_CTRL	0x14D
+#define SSBI_REG_NFC_TEST	0x14E
+
+/* NFC_CTRL */
+#define PM8XXX_NFC_SUPPORT_EN		0x80
+#define PM8XXX_NFC_LDO_EN		0x40
+#define PM8XXX_NFC_EN			0x20
+#define PM8XXX_NFC_EXT_VDDLDO_EN	0x10
+#define PM8XXX_NFC_VPH_PWR_EN		0x08
+#define PM8XXX_NFC_RESERVED		0x04
+#define PM8XXX_NFC_VDDLDO_LEVEL		0x03
+
+/* NFC_TEST */
+#define PM8XXX_NFC_VDDLDO_MON_EN	0x80
+#define PM8XXX_NFC_ATEST_EN		0x40
+#define PM8XXX_NFC_DTEST1_EN		0x20
+#define PM8XXX_NFC_RESERVED2		0x18
+#define PM8XXX_NFC_VDDLDO_OK_S		0x04
+#define PM8XXX_NFC_MBG_EN_S		0x02
+#define PM8XXX_NFC_EXT_EN_S		0x01
+
+struct pm8xxx_nfc_device {
+	struct device *dev;
+	struct mutex		nfc_mutex;
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry		*dent;
+#endif
+};
+static struct pm8xxx_nfc_device	*nfc_dev;
+
+/* APIs */
+/*
+ * pm8xxx_nfc_request - request a handle to access NFC device
+ */
+struct pm8xxx_nfc_device *pm8xxx_nfc_request(void)
+{
+	return nfc_dev;
+}
+EXPORT_SYMBOL(pm8xxx_nfc_request);
+
+/*
+ * pm8xxx_nfc_config - configure NFC signals
+ *
+ * @nfcdev: the NFC device
+ * @mask: signal mask to configure
+ * @flags: control flags
+ */
+int pm8xxx_nfc_config(struct pm8xxx_nfc_device *nfcdev, u32 mask, u32 flags)
+{
+	u8	nfc_ctrl, nfc_test, m, f;
+	int	rc;
+
+	if (nfcdev == NULL || IS_ERR(nfcdev) || !mask)
+		return -EINVAL;
+
+	mutex_lock(&nfcdev->nfc_mutex);
+
+	if (!(mask & PM_NFC_CTRL_REQ))
+		goto config_test;
+
+	rc = pm8xxx_readb(nfcdev->dev->parent, SSBI_REG_NFC_CTRL, &nfc_ctrl);
+	if (rc) {
+		pr_err("%s: FAIL pm8xxx_readb(): rc=%d (nfc_ctrl=0x%x)\n",
+		       __func__, rc, nfc_ctrl);
+		goto config_done;
+	}
+
+	m = mask & 0x00ff;
+	f = flags & 0x00ff;
+	nfc_ctrl &= ~m;
+	nfc_ctrl |= m & f;
+
+	rc = pm8xxx_writeb(nfcdev->dev->parent, SSBI_REG_NFC_CTRL, nfc_ctrl);
+	if (rc) {
+		pr_err("%s: FAIL pm8xxx_writeb(): rc=%d (nfc_ctrl=0x%x)\n",
+		       __func__, rc, nfc_ctrl);
+		goto config_done;
+	}
+
+config_test:
+	if (!(mask & PM_NFC_TEST_REQ))
+		goto config_done;
+
+	rc = pm8xxx_readb(nfcdev->dev->parent, SSBI_REG_NFC_TEST, &nfc_test);
+	if (rc) {
+		pr_err("%s: FAIL pm8xxx_readb(): rc=%d (nfc_test=0x%x)\n",
+		       __func__, rc, nfc_test);
+		goto config_done;
+	}
+
+	m = (mask >> 8) & 0x00ff;
+	f = (flags >> 8) & 0x00ff;
+	nfc_test &= ~m;
+	nfc_test |= m & f;
+
+	rc = pm8xxx_writeb(nfcdev->dev->parent, SSBI_REG_NFC_TEST, nfc_test);
+	if (rc) {
+		pr_err("%s: FAIL pm8xxx_writeb(): rc=%d (nfc_test=0x%x)\n",
+		       __func__, rc, nfc_test);
+		goto config_done;
+	}
+
+config_done:
+	mutex_unlock(&nfcdev->nfc_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(pm8xxx_nfc_config);
+
+/*
+ * pm8xxx_nfc_get_status - get NFC status
+ *
+ * @nfcdev: the NFC device
+ * @mask: of status mask to read
+ * @status: pointer to the status variable
+ */
+int pm8xxx_nfc_get_status(struct pm8xxx_nfc_device *nfcdev,
+			  u32 mask, u32 *status)
+{
+	u8	nfc_ctrl, nfc_test;
+	u32	st;
+	int	rc;
+
+	if (nfcdev == NULL || IS_ERR(nfcdev) || status == NULL)
+		return -EINVAL;
+
+	st = 0;
+	mutex_lock(&nfcdev->nfc_mutex);
+
+	if (!(mask & PM_NFC_CTRL_REQ))
+		goto read_test;
+
+	rc = pm8xxx_readb(nfcdev->dev->parent, SSBI_REG_NFC_CTRL, &nfc_ctrl);
+	if (rc) {
+		pr_err("%s: FAIL pm8xxx_readb(): rc=%d (nfc_ctrl=0x%x)\n",
+		       __func__, rc, nfc_ctrl);
+		goto get_status_done;
+	}
+
+read_test:
+	if (!(mask & (PM_NFC_TEST_REQ | PM_NFC_TEST_STATUS)))
+		goto get_status_done;
+
+	rc = pm8xxx_readb(nfcdev->dev->parent, SSBI_REG_NFC_TEST, &nfc_test);
+	if (rc)
+		pr_err("%s: FAIL pm8xxx_readb(): rc=%d (nfc_test=0x%x)\n",
+		       __func__, rc, nfc_test);
+
+get_status_done:
+	st = nfc_ctrl;
+	st |= nfc_test << 8;
+	*status = st;
+
+	mutex_unlock(&nfcdev->nfc_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(pm8xxx_nfc_get_status);
+
+/*
+ * pm8xxx_nfc_free - free the NFC device
+ */
+void pm8xxx_nfc_free(struct pm8xxx_nfc_device *nfcdev)
+{
+	/* Disable all signals */
+	pm8xxx_nfc_config(nfcdev, PM_NFC_CTRL_REQ, 0);
+}
+EXPORT_SYMBOL(pm8xxx_nfc_free);
+
+#if defined(CONFIG_DEBUG_FS)
+static int pm8xxx_nfc_debug_set(void *data, u64 val)
+{
+	struct pm8xxx_nfc_device *nfcdev;
+	u32	mask, control;
+	int	rc;
+
+	nfcdev = (struct pm8xxx_nfc_device *)data;
+	control = (u32)val & 0xffff;
+	mask = ((u32)val >> 16) & 0xffff;
+	rc = pm8xxx_nfc_config(nfcdev, mask, control);
+	if (rc)
+		pr_err("%s: ERR pm8xxx_nfc_config: rc=%d, "
+		       "[mask, control]=[0x%x, 0x%x]\n",
+		       __func__, rc, mask, control);
+
+	return 0;
+}
+
+static int pm8xxx_nfc_debug_get(void *data, u64 *val)
+{
+	struct pm8xxx_nfc_device *nfcdev;
+	u32	status;
+	int	rc;
+
+	nfcdev = (struct pm8xxx_nfc_device *)data;
+	rc = pm8xxx_nfc_get_status(nfcdev, (u32)-1, &status);
+	if (rc)
+		pr_err("%s: ERR pm8xxx_nfc_get_status: rc=%d, status=0x%x\n",
+		       __func__, rc, status);
+
+	if (val)
+		*val = (u64)status;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pm8xxx_nfc_fops, pm8xxx_nfc_debug_get,
+			pm8xxx_nfc_debug_set, "%llu\n");
+
+static int pm8xxx_nfc_debug_init(struct pm8xxx_nfc_device *nfcdev)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_file("pm8xxx-nfc", 0644, NULL,
+				   (void *)nfcdev, &pm8xxx_nfc_fops);
+
+	if (dent == NULL || IS_ERR(dent))
+		pr_err("%s: ERR debugfs_create_file: dent=0x%x\n",
+		       __func__, (unsigned)dent);
+
+	nfcdev->dent = dent;
+	return 0;
+}
+#endif
+
+static int __devinit pm8xxx_nfc_probe(struct platform_device *pdev)
+{
+	struct pm8xxx_nfc_device	*nfcdev;
+
+	nfcdev = kzalloc(sizeof *nfcdev, GFP_KERNEL);
+	if (nfcdev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	mutex_init(&nfcdev->nfc_mutex);
+
+	nfcdev->dev = &pdev->dev;
+	nfc_dev = nfcdev;
+	platform_set_drvdata(pdev, nfcdev);
+
+#if defined(CONFIG_DEBUG_FS)
+	pm8xxx_nfc_debug_init(nfc_dev);
+#endif
+
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+}
+
+static int __devexit pm8xxx_nfc_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_nfc_device *nfcdev = platform_get_drvdata(pdev);
+
+#if defined(CONFIG_DEBUG_FS)
+	debugfs_remove(nfcdev->dent);
+#endif
+
+	platform_set_drvdata(pdev, NULL);
+	kfree(nfcdev);
+	return 0;
+}
+
+static struct platform_driver pm8xxx_nfc_driver = {
+	.probe		= pm8xxx_nfc_probe,
+	.remove		= __devexit_p(pm8xxx_nfc_remove),
+	.driver		= {
+		.name = PM8XXX_NFC_DEV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8xxx_nfc_init(void)
+{
+	return platform_driver_register(&pm8xxx_nfc_driver);
+}
+
+static void __exit pm8xxx_nfc_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_nfc_driver);
+}
+
+module_init(pm8xxx_nfc_init);
+module_exit(pm8xxx_nfc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PM8XXX NFC driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8XXX_NFC_DEV_NAME);
diff --git a/drivers/misc/pm8xxx-upl.c b/drivers/misc/pm8xxx-upl.c
new file mode 100644
index 0000000..d3d9746f
--- /dev/null
+++ b/drivers/misc/pm8xxx-upl.c
@@ -0,0 +1,350 @@
+/* Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PM8XXX UPL driver
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/upl.h>
+
+/* PMIC8XXX UPL registers */
+#define SSBI_REG_UPL_CTRL		0x17B
+#define SSBI_REG_UPL_TRUTHTABLE1	0x17C
+#define SSBI_REG_UPL_TRUTHTABLE2	0x17D
+
+struct pm8xxx_upl_device {
+	struct device		*dev;
+	struct mutex		upl_mutex;
+#if defined(CONFIG_DEBUG_FS)
+	struct dentry		*dent;
+#endif
+};
+static struct pm8xxx_upl_device *upl_dev;
+
+/* APIs */
+
+/*
+ * pm8xxx_upl_request - request a handle to access UPL device
+ */
+struct pm8xxx_upl_device *pm8xxx_upl_request(void)
+{
+	return upl_dev;
+}
+EXPORT_SYMBOL(pm8xxx_upl_request);
+
+/*
+ * pm8xxx_upl_read_truthtable - read value currently stored in UPL truth table
+ *
+ * @upldev: the UPL device
+ * @truthtable: value read from UPL truth table
+ */
+int pm8xxx_upl_read_truthtable(struct pm8xxx_upl_device *upldev,
+				u16 *truthtable)
+{
+	int rc = 0;
+	u8 table[2];
+
+	if (upldev == NULL || IS_ERR(upldev))
+		return -EINVAL;
+
+	mutex_lock(&upldev->upl_mutex);
+
+	rc = pm8xxx_readb(upldev->dev->parent, SSBI_REG_UPL_TRUTHTABLE1,
+							&(table[0]));
+	if (rc) {
+		pr_err("%s: FAIL pm8xxx_readb(0x%X)=0x%02X: rc=%d\n",
+			__func__, SSBI_REG_UPL_TRUTHTABLE1, table[0], rc);
+		goto upl_read_done;
+	}
+
+	rc = pm8xxx_readb(upldev->dev->parent, SSBI_REG_UPL_TRUTHTABLE2,
+							&(table[1]));
+	if (rc)
+		pr_err("%s: FAIL pm8xxx_readb(0x%X)=0x%02X: rc=%d\n",
+			__func__, SSBI_REG_UPL_TRUTHTABLE2, table[1], rc);
+upl_read_done:
+	mutex_unlock(&upldev->upl_mutex);
+	*truthtable = (((u16)table[1]) << 8) | table[0];
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_upl_read_truthtable);
+
+/*
+ * pm8xxx_upl_writes_truthtable - write value into UPL truth table
+ *
+ * @upldev: the UPL device
+ * @truthtable: value written to UPL truth table
+ *
+ * Each bit in parameter "truthtable" corresponds to the UPL output for a given
+ * set of input pin values. For example, if the input pins have the following
+ * values: A=1, B=1, C=1, D=0, then the UPL would output the value of bit 14
+ * (0b1110) in parameter "truthtable".
+ */
+int pm8xxx_upl_write_truthtable(struct pm8xxx_upl_device *upldev,
+				u16 truthtable)
+{
+	int rc = 0;
+	u8 table[2];
+
+	if (upldev == NULL || IS_ERR(upldev))
+		return -EINVAL;
+
+	table[0] = truthtable & 0xFF;
+	table[1] = (truthtable >> 8) & 0xFF;
+
+	mutex_lock(&upldev->upl_mutex);
+
+	rc = pm8xxx_writeb(upldev->dev->parent, SSBI_REG_UPL_TRUTHTABLE1,
+								table[0]);
+	if (rc) {
+		pr_err("%s: FAIL pm8xxx_writeb(0x%X)=0x%04X: rc=%d\n",
+			__func__, SSBI_REG_UPL_TRUTHTABLE1, table[0], rc);
+		goto upl_write_done;
+	}
+
+	rc = pm8xxx_writeb(upldev->dev->parent, SSBI_REG_UPL_TRUTHTABLE2,
+								table[1]);
+	if (rc)
+		pr_err("%s: FAIL pm8xxx_writeb(0x%X)=0x%04X: rc=%d\n",
+			__func__, SSBI_REG_UPL_TRUTHTABLE2, table[1], rc);
+upl_write_done:
+	mutex_unlock(&upldev->upl_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_upl_write_truthtable);
+
+/*
+ * pm8xxx_upl_config - configure UPL I/O settings and UPL enable/disable
+ *
+ * @upldev: the UPL device
+ * @mask: setting mask to configure
+ * @flags: setting flags
+ */
+int pm8xxx_upl_config(struct pm8xxx_upl_device *upldev, u32 mask, u32 flags)
+{
+	int rc;
+	u8 upl_ctrl, m, f;
+
+	if (upldev == NULL || IS_ERR(upldev))
+		return -EINVAL;
+
+	mutex_lock(&upldev->upl_mutex);
+
+	rc = pm8xxx_readb(upldev->dev->parent, SSBI_REG_UPL_CTRL, &upl_ctrl);
+	if (rc) {
+		pr_err("%s: FAIL pm8xxx_readb(0x%X)=0x%02X: rc=%d\n",
+			__func__, SSBI_REG_UPL_CTRL, upl_ctrl, rc);
+		goto upl_config_done;
+	}
+
+	m = mask & 0x00ff;
+	f = flags & 0x00ff;
+	upl_ctrl &= ~m;
+	upl_ctrl |= m & f;
+
+	rc = pm8xxx_writeb(upldev->dev->parent, SSBI_REG_UPL_CTRL, upl_ctrl);
+	if (rc)
+		pr_err("%s: FAIL pm8xxx_writeb(0x%X)=0x%02X: rc=%d\n",
+			__func__, SSBI_REG_UPL_CTRL, upl_ctrl, rc);
+upl_config_done:
+	mutex_unlock(&upldev->upl_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_upl_config);
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int truthtable_set(void *data, u64 val)
+{
+	int rc;
+
+	rc = pm8xxx_upl_write_truthtable(data, val);
+	if (rc)
+		pr_err("%s: pm8xxx_upl_write_truthtable: rc=%d, "
+			"truthtable=0x%llX\n", __func__, rc, val);
+	return rc;
+}
+
+static int truthtable_get(void *data, u64 *val)
+{
+	int rc;
+	u16 truthtable;
+
+	rc = pm8xxx_upl_read_truthtable(data, &truthtable);
+	if (rc)
+		pr_err("%s: pm8xxx_upl_read_truthtable: rc=%d, "
+			"truthtable=0x%X\n", __func__, rc, truthtable);
+	if (val)
+		*val = truthtable;
+
+	return rc;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(upl_truthtable_fops, truthtable_get,
+			truthtable_set, "0x%04llX\n");
+
+/* enter values as 0xMMMMFFFF where MMMM is the mask and FFFF is the flags */
+static int control_set(void *data, u64 val)
+{
+	u8 mask, flags;
+	int rc;
+
+	flags = val & 0xFFFF;
+	mask = (val >> 16) & 0xFFFF;
+
+	rc = pm8xxx_upl_config(data, mask, flags);
+	if (rc)
+		pr_err("%s: pm8xxx_upl_config: rc=%d, mask = 0x%X, "
+			"flags = 0x%X\n", __func__, rc, mask, flags);
+	return rc;
+}
+
+static int control_get(void *data, u64 *val)
+{
+	struct pm8xxx_upl_device *upldev;
+	int rc = 0;
+	u8 ctrl;
+
+	upldev = data;
+
+	mutex_lock(&upldev->upl_mutex);
+
+	rc = pm8xxx_readb(upldev->dev->parent, SSBI_REG_UPL_CTRL, &ctrl);
+	if (rc)
+		pr_err("%s: FAIL pm8xxx_readb(): rc=%d (ctrl=0x%02X)\n",
+		       __func__, rc, ctrl);
+
+	mutex_unlock(&upldev->upl_mutex);
+
+	*val = ctrl;
+
+	return rc;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(upl_control_fops, control_get,
+			control_set, "0x%02llX\n");
+
+static int pm8xxx_upl_debug_init(struct pm8xxx_upl_device *upldev)
+{
+	struct dentry *dent;
+	struct dentry *temp;
+
+	dent = debugfs_create_dir("pm8xxx-upl", NULL);
+	if (dent == NULL || IS_ERR(dent)) {
+		pr_err("%s: ERR debugfs_create_dir: dent=0x%X\n",
+					__func__, (unsigned)dent);
+		return -ENOMEM;
+	}
+
+	temp = debugfs_create_file("truthtable", S_IRUSR | S_IWUSR, dent,
+					upldev, &upl_truthtable_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+					__func__, (unsigned)dent);
+		goto debug_error;
+	}
+
+	temp = debugfs_create_file("control", S_IRUSR | S_IWUSR, dent,
+					upldev, &upl_control_fops);
+	if (temp == NULL || IS_ERR(temp)) {
+		pr_err("%s: ERR debugfs_create_file: dent=0x%X\n",
+					__func__, (unsigned)dent);
+		goto debug_error;
+	}
+
+	upldev->dent = dent;
+	return 0;
+
+debug_error:
+	debugfs_remove_recursive(dent);
+	return -ENOMEM;
+}
+
+static int __devexit pm8xxx_upl_debug_remove(struct pm8xxx_upl_device *upldev)
+{
+	debugfs_remove_recursive(upldev->dent);
+	return 0;
+}
+
+#endif /* CONFIG_DEBUG_FS */
+
+static int __devinit pm8xxx_upl_probe(struct platform_device *pdev)
+{
+	struct pm8xxx_upl_device	*upldev;
+
+	upldev = kzalloc(sizeof *upldev, GFP_KERNEL);
+	if (upldev == NULL) {
+		pr_err("%s: kzalloc() failed.\n", __func__);
+		return -ENOMEM;
+	}
+
+	mutex_init(&upldev->upl_mutex);
+
+	upl_dev = upldev;
+	upldev->dev = &pdev->dev;
+	platform_set_drvdata(pdev, upldev);
+
+#if defined(CONFIG_DEBUG_FS)
+	pm8xxx_upl_debug_init(upl_dev);
+#endif
+	pr_notice("%s: OK\n", __func__);
+	return 0;
+}
+
+static int __devexit pm8xxx_upl_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_upl_device *upldev = platform_get_drvdata(pdev);
+
+#if defined(CONFIG_DEBUG_FS)
+	pm8xxx_upl_debug_remove(upldev);
+#endif
+
+	platform_set_drvdata(pdev, NULL);
+	kfree(upldev);
+	pr_notice("%s: OK\n", __func__);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_upl_driver = {
+	.probe		= pm8xxx_upl_probe,
+	.remove		= __devexit_p(pm8xxx_upl_remove),
+	.driver		= {
+		.name = PM8XXX_UPL_DEV_NAME,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8xxx_upl_init(void)
+{
+	return platform_driver_register(&pm8xxx_upl_driver);
+}
+
+static void __exit pm8xxx_upl_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_upl_driver);
+}
+
+module_init(pm8xxx_upl_init);
+module_exit(pm8xxx_upl_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PM8XXX UPL driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:" PM8XXX_UPL_DEV_NAME);
diff --git a/drivers/misc/pm8xxx-vibrator.c b/drivers/misc/pm8xxx-vibrator.c
new file mode 100644
index 0000000..62e7b45
--- /dev/null
+++ b/drivers/misc/pm8xxx-vibrator.c
@@ -0,0 +1,325 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pm8xxx/vibrator.h>
+
+#include "../staging/android/timed_output.h"
+
+#define VIB_DRV			0x4A
+
+#define VIB_DRV_SEL_MASK	0xf8
+#define VIB_DRV_SEL_SHIFT	0x03
+#define VIB_DRV_EN_MANUAL_MASK	0xfc
+#define VIB_DRV_LOGIC_SHIFT	0x2
+
+#define VIB_MAX_LEVEL_mV	3100
+#define VIB_MIN_LEVEL_mV	1200
+
+struct pm8xxx_vib {
+	struct hrtimer vib_timer;
+	struct timed_output_dev timed_dev;
+	spinlock_t lock;
+	struct work_struct work;
+	struct device *dev;
+	const struct pm8xxx_vibrator_platform_data *pdata;
+	int state;
+	int level;
+	u8  reg_vib_drv;
+};
+
+static struct pm8xxx_vib *vib_dev;
+
+int pm8xxx_vibrator_config(struct pm8xxx_vib_config *vib_config)
+{
+	u8 reg = 0;
+	int rc;
+
+	if (vib_dev == NULL) {
+		pr_err("%s: vib_dev is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	if (vib_config->drive_mV) {
+		if ((vib_config->drive_mV < VIB_MIN_LEVEL_mV) ||
+			(vib_config->drive_mV > VIB_MAX_LEVEL_mV)) {
+			pr_err("Invalid vibrator drive strength\n");
+			return -EINVAL;
+		}
+	}
+
+	reg = (vib_config->drive_mV / 100) << VIB_DRV_SEL_SHIFT;
+
+	reg |= (!!vib_config->active_low) << VIB_DRV_LOGIC_SHIFT;
+
+	reg |= vib_config->enable_mode;
+
+	rc = pm8xxx_writeb(vib_dev->dev->parent, VIB_DRV, reg);
+	if (rc)
+		pr_err("%s: pm8xxx write failed: rc=%d\n", __func__, rc);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8xxx_vibrator_config);
+
+/* REVISIT: just for debugging, will be removed in final working version */
+static void __dump_vib_regs(struct pm8xxx_vib *vib, char *msg)
+{
+	u8 temp;
+
+	dev_dbg(vib->dev, "%s\n", msg);
+
+	pm8xxx_readb(vib->dev->parent, VIB_DRV, &temp);
+	dev_dbg(vib->dev, "VIB_DRV - %X\n", temp);
+}
+
+static int pm8xxx_vib_read_u8(struct pm8xxx_vib *vib,
+				 u8 *data, u16 reg)
+{
+	int rc;
+
+	rc = pm8xxx_readb(vib->dev->parent, reg, data);
+	if (rc < 0)
+		dev_warn(vib->dev, "Error reading pm8xxx: %X - ret %X\n",
+				reg, rc);
+
+	return rc;
+}
+
+static int pm8xxx_vib_write_u8(struct pm8xxx_vib *vib,
+				 u8 data, u16 reg)
+{
+	int rc;
+
+	rc = pm8xxx_writeb(vib->dev->parent, reg, data);
+	if (rc < 0)
+		dev_warn(vib->dev, "Error writing pm8xxx: %X - ret %X\n",
+				reg, rc);
+	return rc;
+}
+
+static int pm8xxx_vib_set(struct pm8xxx_vib *vib, int on)
+{
+	int rc;
+	u8 val;
+
+	if (on) {
+		val = vib->reg_vib_drv;
+		val |= ((vib->level << VIB_DRV_SEL_SHIFT) & VIB_DRV_SEL_MASK);
+		rc = pm8xxx_vib_write_u8(vib, val, VIB_DRV);
+		if (rc < 0)
+			return rc;
+		vib->reg_vib_drv = val;
+	} else {
+		val = vib->reg_vib_drv;
+		val &= ~VIB_DRV_SEL_MASK;
+		rc = pm8xxx_vib_write_u8(vib, val, VIB_DRV);
+		if (rc < 0)
+			return rc;
+		vib->reg_vib_drv = val;
+	}
+	__dump_vib_regs(vib, "vib_set_end");
+
+	return rc;
+}
+
+static void pm8xxx_vib_enable(struct timed_output_dev *dev, int value)
+{
+	struct pm8xxx_vib *vib = container_of(dev, struct pm8xxx_vib,
+					 timed_dev);
+	unsigned long flags;
+
+retry:
+	spin_lock_irqsave(&vib->lock, flags);
+	if (hrtimer_try_to_cancel(&vib->vib_timer) < 0) {
+		spin_unlock_irqrestore(&vib->lock, flags);
+		cpu_relax();
+		goto retry;
+	}
+
+	if (value == 0)
+		vib->state = 0;
+	else {
+		value = (value > vib->pdata->max_timeout_ms ?
+				 vib->pdata->max_timeout_ms : value);
+		vib->state = 1;
+		hrtimer_start(&vib->vib_timer,
+			      ktime_set(value / 1000, (value % 1000) * 1000000),
+			      HRTIMER_MODE_REL);
+	}
+	spin_unlock_irqrestore(&vib->lock, flags);
+	schedule_work(&vib->work);
+}
+
+static void pm8xxx_vib_update(struct work_struct *work)
+{
+	struct pm8xxx_vib *vib = container_of(work, struct pm8xxx_vib,
+					 work);
+
+	pm8xxx_vib_set(vib, vib->state);
+}
+
+static int pm8xxx_vib_get_time(struct timed_output_dev *dev)
+{
+	struct pm8xxx_vib *vib = container_of(dev, struct pm8xxx_vib,
+							 timed_dev);
+
+	if (hrtimer_active(&vib->vib_timer)) {
+		ktime_t r = hrtimer_get_remaining(&vib->vib_timer);
+		return (int)ktime_to_us(r);
+	} else
+		return 0;
+}
+
+static enum hrtimer_restart pm8xxx_vib_timer_func(struct hrtimer *timer)
+{
+	struct pm8xxx_vib *vib = container_of(timer, struct pm8xxx_vib,
+							 vib_timer);
+
+	vib->state = 0;
+	schedule_work(&vib->work);
+
+	return HRTIMER_NORESTART;
+}
+
+#ifdef CONFIG_PM
+static int pm8xxx_vib_suspend(struct device *dev)
+{
+	struct pm8xxx_vib *vib = dev_get_drvdata(dev);
+
+	hrtimer_cancel(&vib->vib_timer);
+	cancel_work_sync(&vib->work);
+	/* turn-off vibrator */
+	pm8xxx_vib_set(vib, 0);
+
+	return 0;
+}
+
+static const struct dev_pm_ops pm8xxx_vib_pm_ops = {
+	.suspend = pm8xxx_vib_suspend,
+};
+#endif
+
+static int __devinit pm8xxx_vib_probe(struct platform_device *pdev)
+
+{
+	const struct pm8xxx_vibrator_platform_data *pdata =
+						pdev->dev.platform_data;
+	struct pm8xxx_vib *vib;
+	u8 val;
+	int rc;
+
+	if (!pdata)
+		return -EINVAL;
+
+	if (pdata->level_mV < VIB_MIN_LEVEL_mV ||
+			 pdata->level_mV > VIB_MAX_LEVEL_mV)
+		return -EINVAL;
+
+	vib = kzalloc(sizeof(*vib), GFP_KERNEL);
+	if (!vib)
+		return -ENOMEM;
+
+	vib->pdata	= pdata;
+	vib->level	= pdata->level_mV / 100;
+	vib->dev	= &pdev->dev;
+
+	spin_lock_init(&vib->lock);
+	INIT_WORK(&vib->work, pm8xxx_vib_update);
+
+	hrtimer_init(&vib->vib_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	vib->vib_timer.function = pm8xxx_vib_timer_func;
+
+	vib->timed_dev.name = "vibrator";
+	vib->timed_dev.get_time = pm8xxx_vib_get_time;
+	vib->timed_dev.enable = pm8xxx_vib_enable;
+
+	__dump_vib_regs(vib, "boot_vib_default");
+
+	/*
+	 * Configure the vibrator, it operates in manual mode
+	 * for timed_output framework.
+	 */
+	rc = pm8xxx_vib_read_u8(vib, &val, VIB_DRV);
+	if (rc < 0)
+		goto err_read_vib;
+	val &= ~VIB_DRV_EN_MANUAL_MASK;
+	rc = pm8xxx_vib_write_u8(vib, val, VIB_DRV);
+	if (rc < 0)
+		goto err_read_vib;
+
+	vib->reg_vib_drv = val;
+
+	rc = timed_output_dev_register(&vib->timed_dev);
+	if (rc < 0)
+		goto err_read_vib;
+
+	pm8xxx_vib_enable(&vib->timed_dev, pdata->initial_vibrate_ms);
+
+	platform_set_drvdata(pdev, vib);
+
+	vib_dev = vib;
+
+	return 0;
+
+err_read_vib:
+	kfree(vib);
+	return rc;
+}
+
+static int __devexit pm8xxx_vib_remove(struct platform_device *pdev)
+{
+	struct pm8xxx_vib *vib = platform_get_drvdata(pdev);
+
+	cancel_work_sync(&vib->work);
+	hrtimer_cancel(&vib->vib_timer);
+	timed_output_dev_unregister(&vib->timed_dev);
+	platform_set_drvdata(pdev, NULL);
+	kfree(vib);
+
+	return 0;
+}
+
+static struct platform_driver pm8xxx_vib_driver = {
+	.probe		= pm8xxx_vib_probe,
+	.remove		= __devexit_p(pm8xxx_vib_remove),
+	.driver		= {
+		.name	= PM8XXX_VIBRATOR_DEV_NAME,
+		.owner	= THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm	= &pm8xxx_vib_pm_ops,
+#endif
+	},
+};
+
+static int __init pm8xxx_vib_init(void)
+{
+	return platform_driver_register(&pm8xxx_vib_driver);
+}
+module_init(pm8xxx_vib_init);
+
+static void __exit pm8xxx_vib_exit(void)
+{
+	platform_driver_unregister(&pm8xxx_vib_driver);
+}
+module_exit(pm8xxx_vib_exit);
+
+MODULE_ALIAS("platform:" PM8XXX_VIBRATOR_DEV_NAME);
+MODULE_DESCRIPTION("pm8xxx vibrator driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c
new file mode 100644
index 0000000..0ab5b69
--- /dev/null
+++ b/drivers/misc/pmem.c
@@ -0,0 +1,2961 @@
+/* drivers/android/pmem.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/fmem.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/android_pmem.h>
+#include <linux/mempolicy.h>
+#include <linux/sched.h>
+#include <linux/kobject.h>
+#include <linux/pm_runtime.h>
+#include <linux/memory_alloc.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/mm_types.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/sizes.h>
+#include <asm/mach/map.h>
+#include <asm/page.h>
+
+#define PMEM_MAX_DEVICES (10)
+
+#define PMEM_MAX_ORDER (128)
+#define PMEM_MIN_ALLOC PAGE_SIZE
+
+#define PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS (64)
+
+#define PMEM_32BIT_WORD_ORDER (5)
+#define PMEM_BITS_PER_WORD_MASK (BITS_PER_LONG - 1)
+
+#ifdef CONFIG_ANDROID_PMEM_DEBUG
+#define PMEM_DEBUG 1
+#else
+#define PMEM_DEBUG 0
+#endif
+
+#define SYSTEM_ALLOC_RETRY 10
+
+/* indicates that a refernce to this file has been taken via get_pmem_file,
+ * the file should not be released until put_pmem_file is called */
+#define PMEM_FLAGS_BUSY 0x1
+/* indicates that this is a suballocation of a larger master range */
+#define PMEM_FLAGS_CONNECTED 0x1 << 1
+/* indicates this is a master and not a sub allocation and that it is mmaped */
+#define PMEM_FLAGS_MASTERMAP 0x1 << 2
+/* submap and unsubmap flags indicate:
+ * 00: subregion has never been mmaped
+ * 10: subregion has been mmaped, reference to the mm was taken
+ * 11: subretion has ben released, refernece to the mm still held
+ * 01: subretion has been released, reference to the mm has been released
+ */
+#define PMEM_FLAGS_SUBMAP 0x1 << 3
+#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
+
+struct pmem_data {
+	/* in alloc mode: an index into the bitmap
+	 * in no_alloc mode: the size of the allocation */
+	int index;
+	/* see flags above for descriptions */
+	unsigned int flags;
+	/* protects this data field, if the mm_mmap sem will be held at the
+	 * same time as this sem, the mm sem must be taken first (as this is
+	 * the order for vma_open and vma_close ops */
+	struct rw_semaphore sem;
+	/* info about the mmaping process */
+	struct vm_area_struct *vma;
+	/* task struct of the mapping process */
+	struct task_struct *task;
+	/* process id of teh mapping process */
+	pid_t pid;
+	/* file descriptor of the master */
+	int master_fd;
+	/* file struct of the master */
+	struct file *master_file;
+	/* a list of currently available regions if this is a suballocation */
+	struct list_head region_list;
+	/* a linked list of data so we can access them for debugging */
+	struct list_head list;
+#if PMEM_DEBUG
+	int ref;
+#endif
+};
+
+struct pmem_bits {
+	unsigned allocated:1;		/* 1 if allocated, 0 if free */
+	unsigned order:7;		/* size of the region in pmem space */
+};
+
+struct pmem_region_node {
+	struct pmem_region region;
+	struct list_head list;
+};
+
+#define PMEM_DEBUG_MSGS 0
+#if PMEM_DEBUG_MSGS
+#define DLOG(fmt,args...) \
+	do { pr_debug("[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
+		    ##args); } \
+	while (0)
+#else
+#define DLOG(x...) do {} while (0)
+#endif
+
+enum pmem_align {
+	PMEM_ALIGN_4K,
+	PMEM_ALIGN_1M,
+};
+
+#define PMEM_NAME_SIZE 16
+
+struct alloc_list {
+	void *addr;                  /* physical addr of allocation */
+	void *aaddr;                 /* aligned physical addr       */
+	unsigned int size;           /* total size of allocation    */
+	unsigned char __iomem *vaddr; /* Virtual addr                */
+	struct list_head allocs;
+};
+
+struct pmem_info {
+	struct miscdevice dev;
+	/* physical start address of the remaped pmem space */
+	unsigned long base;
+	/* vitual start address of the remaped pmem space */
+	unsigned char __iomem *vbase;
+	/* total size of the pmem space */
+	unsigned long size;
+	/* number of entries in the pmem space */
+	unsigned long num_entries;
+	/* pfn of the garbage page in memory */
+	unsigned long garbage_pfn;
+	/* which memory type (i.e. SMI, EBI1) this PMEM device is backed by */
+	unsigned memory_type;
+
+	char name[PMEM_NAME_SIZE];
+
+	/* index of the garbage page in the pmem space */
+	int garbage_index;
+	/* reserved virtual address range */
+	struct vm_struct *area;
+
+	enum pmem_allocator_type allocator_type;
+
+	int (*allocate)(const int,
+			const unsigned long,
+			const unsigned int);
+	int (*free)(int, int);
+	int (*free_space)(int, struct pmem_freespace *);
+	unsigned long (*len)(int, struct pmem_data *);
+	unsigned long (*start_addr)(int, struct pmem_data *);
+
+	/* actual size of memory element, e.g.: (4 << 10) is 4K */
+	unsigned int quantum;
+
+	/* indicates maps of this region should be cached, if a mix of
+	 * cached and uncached is desired, set this and open the device with
+	 * O_SYNC to get an uncached region */
+	unsigned cached;
+	unsigned buffered;
+	union {
+		struct {
+			/* in all_or_nothing allocator mode the first mapper
+			 * gets the whole space and sets this flag */
+			unsigned allocated;
+		} all_or_nothing;
+
+		struct {
+			/* the buddy allocator bitmap for the region
+			 * indicating which entries are allocated and which
+			 * are free.
+			 */
+
+			struct pmem_bits *buddy_bitmap;
+		} buddy_bestfit;
+
+		struct {
+			unsigned int bitmap_free; /* # of zero bits/quanta */
+			uint32_t *bitmap;
+			int32_t bitmap_allocs;
+			struct {
+				short bit;
+				unsigned short quanta;
+			} *bitm_alloc;
+		} bitmap;
+
+		struct {
+			unsigned long used;      /* Bytes currently allocated */
+			struct list_head alist;  /* List of allocations       */
+		} system_mem;
+	} allocator;
+
+	int id;
+	struct kobject kobj;
+
+	/* for debugging, creates a list of pmem file structs, the
+	 * data_list_mutex should be taken before pmem_data->sem if both are
+	 * needed */
+	struct mutex data_list_mutex;
+	struct list_head data_list;
+	/* arena_mutex protects the global allocation arena
+	 *
+	 * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
+	 * down(pmem_data->sem) => mutex_lock(arena_mutex)
+	 */
+	struct mutex arena_mutex;
+
+	long (*ioctl)(struct file *, unsigned int, unsigned long);
+	int (*release)(struct inode *, struct file *);
+	/* reference count of allocations */
+	atomic_t allocation_cnt;
+	/*
+	 * request function for a region when the allocation count goes
+	 * from 0 -> 1
+	 */
+	int (*mem_request)(void *);
+	/*
+	 * release function for a region when the allocation count goes
+	 * from 1 -> 0
+	 */
+	int (*mem_release)(void *);
+	/*
+	 * private data for the request/release callback
+	 */
+	void *region_data;
+	/*
+	 * map and unmap as needed
+	 */
+	int map_on_demand;
+	/*
+	 * memory will be reused through fmem
+	 */
+	int reusable;
+};
+#define to_pmem_info_id(a) (container_of(a, struct pmem_info, kobj)->id)
+
+static void ioremap_pmem(int id);
+static void pmem_put_region(int id);
+static int pmem_get_region(int id);
+
+static struct pmem_info pmem[PMEM_MAX_DEVICES];
+static int id_count;
+
+#define PMEM_SYSFS_DIR_NAME "pmem_regions" /* under /sys/kernel/ */
+static struct kset *pmem_kset;
+
+#define PMEM_IS_FREE_BUDDY(id, index) \
+	(!(pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].allocated))
+#define PMEM_BUDDY_ORDER(id, index) \
+	(pmem[id].allocator.buddy_bestfit.buddy_bitmap[index].order)
+#define PMEM_BUDDY_INDEX(id, index) \
+	(index ^ (1 << PMEM_BUDDY_ORDER(id, index)))
+#define PMEM_BUDDY_NEXT_INDEX(id, index) \
+	(index + (1 << PMEM_BUDDY_ORDER(id, index)))
+#define PMEM_OFFSET(index) (index * pmem[id].quantum)
+#define PMEM_START_ADDR(id, index) \
+	(PMEM_OFFSET(index) + pmem[id].base)
+#define PMEM_BUDDY_LEN(id, index) \
+	((1 << PMEM_BUDDY_ORDER(id, index)) * pmem[id].quantum)
+#define PMEM_END_ADDR(id, index) \
+	(PMEM_START_ADDR(id, index) + PMEM_LEN(id, index))
+#define PMEM_START_VADDR(id, index) \
+	(PMEM_OFFSET(id, index) + pmem[id].vbase)
+#define PMEM_END_VADDR(id, index) \
+	(PMEM_START_VADDR(id, index) + PMEM_LEN(id, index))
+#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
+#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
+#define PMEM_IS_SUBMAP(data) \
+	((data->flags & PMEM_FLAGS_SUBMAP) && \
+	(!(data->flags & PMEM_FLAGS_UNSUBMAP)))
+
+static int pmem_release(struct inode *, struct file *);
+static int pmem_mmap(struct file *, struct vm_area_struct *);
+static int pmem_open(struct inode *, struct file *);
+static long pmem_ioctl(struct file *, unsigned int, unsigned long);
+
+struct file_operations pmem_fops = {
+	.release = pmem_release,
+	.mmap = pmem_mmap,
+	.open = pmem_open,
+	.unlocked_ioctl = pmem_ioctl,
+};
+
+#define PMEM_ATTR(_name, _mode, _show, _store) {            \
+	.attr = {.name = __stringify(_name), .mode = _mode }, \
+	.show = _show,                                        \
+	.store = _store,                                      \
+}
+
+struct pmem_attr {
+	struct attribute attr;
+	ssize_t(*show) (const int id, char * const);
+	ssize_t(*store) (const int id, const char * const, const size_t count);
+};
+#define to_pmem_attr(a) container_of(a, struct pmem_attr, attr)
+
+#define RW_PMEM_ATTR(name)  \
+static struct pmem_attr pmem_attr_## name = \
+ PMEM_ATTR(name, S_IRUGO | S_IWUSR, show_pmem_## name, store_pmem_## name)
+
+#define RO_PMEM_ATTR(name)  \
+static struct pmem_attr pmem_attr_## name = \
+ PMEM_ATTR(name, S_IRUGO, show_pmem_## name, NULL)
+
+#define WO_PMEM_ATTR(name)  \
+static struct pmem_attr pmem_attr_## name = \
+ PMEM_ATTR(name, S_IWUSR, NULL, store_pmem_## name)
+
+static ssize_t show_pmem(struct kobject *kobj,
+			struct attribute *attr,
+			char *buf)
+{
+	struct pmem_attr *a = to_pmem_attr(attr);
+	return a->show ? a->show(to_pmem_info_id(kobj), buf) : -EIO;
+}
+
+static ssize_t store_pmem(struct kobject *kobj, struct attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct pmem_attr *a = to_pmem_attr(attr);
+	return a->store ? a->store(to_pmem_info_id(kobj), buf, count) : -EIO;
+}
+
+static struct sysfs_ops pmem_ops = {
+	.show = show_pmem,
+	.store = store_pmem,
+};
+
+static ssize_t show_pmem_base(int id, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n",
+		pmem[id].base, pmem[id].base);
+}
+RO_PMEM_ATTR(base);
+
+static ssize_t show_pmem_size(int id, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%lu(%#lx)\n",
+		pmem[id].size, pmem[id].size);
+}
+RO_PMEM_ATTR(size);
+
+static ssize_t show_pmem_allocator_type(int id, char *buf)
+{
+	switch (pmem[id].allocator_type) {
+	case  PMEM_ALLOCATORTYPE_ALLORNOTHING:
+		return scnprintf(buf, PAGE_SIZE, "%s\n", "All or Nothing");
+	case  PMEM_ALLOCATORTYPE_BUDDYBESTFIT:
+		return scnprintf(buf, PAGE_SIZE, "%s\n", "Buddy Bestfit");
+	case  PMEM_ALLOCATORTYPE_BITMAP:
+		return scnprintf(buf, PAGE_SIZE, "%s\n", "Bitmap");
+	case PMEM_ALLOCATORTYPE_SYSTEM:
+		return scnprintf(buf, PAGE_SIZE, "%s\n", "System heap");
+	default:
+		return scnprintf(buf, PAGE_SIZE,
+			"??? Invalid allocator type (%d) for this region! "
+			"Something isn't right.\n",
+			pmem[id].allocator_type);
+	}
+}
+RO_PMEM_ATTR(allocator_type);
+
+static ssize_t show_pmem_mapped_regions(int id, char *buf)
+{
+	struct list_head *elt;
+	int ret;
+
+	ret = scnprintf(buf, PAGE_SIZE,
+		      "pid #: mapped regions (offset, len) (offset,len)...\n");
+
+	mutex_lock(&pmem[id].data_list_mutex);
+	list_for_each(elt, &pmem[id].data_list) {
+		struct pmem_data *data =
+			list_entry(elt, struct pmem_data, list);
+		struct list_head *elt2;
+
+		down_read(&data->sem);
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "pid %u:",
+				data->pid);
+		list_for_each(elt2, &data->region_list) {
+			struct pmem_region_node *region_node = list_entry(elt2,
+					struct pmem_region_node,
+					list);
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+					"(%lx,%lx) ",
+					region_node->region.offset,
+					region_node->region.len);
+		}
+		up_read(&data->sem);
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
+	}
+	mutex_unlock(&pmem[id].data_list_mutex);
+	return ret;
+}
+RO_PMEM_ATTR(mapped_regions);
+
+#define PMEM_COMMON_SYSFS_ATTRS \
+	&pmem_attr_base.attr, \
+	&pmem_attr_size.attr, \
+	&pmem_attr_allocator_type.attr, \
+	&pmem_attr_mapped_regions.attr
+
+
+static ssize_t show_pmem_allocated(int id, char *buf)
+{
+	ssize_t ret;
+
+	mutex_lock(&pmem[id].arena_mutex);
+	ret = scnprintf(buf, PAGE_SIZE, "%s\n",
+		pmem[id].allocator.all_or_nothing.allocated ?
+		"is allocated" : "is NOT allocated");
+	mutex_unlock(&pmem[id].arena_mutex);
+	return ret;
+}
+RO_PMEM_ATTR(allocated);
+
+static struct attribute *pmem_allornothing_attrs[] = {
+	PMEM_COMMON_SYSFS_ATTRS,
+
+	&pmem_attr_allocated.attr,
+
+	NULL
+};
+
+static struct kobj_type pmem_allornothing_ktype = {
+	.sysfs_ops = &pmem_ops,
+	.default_attrs = pmem_allornothing_attrs,
+};
+
+static ssize_t show_pmem_total_entries(int id, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%lu\n", pmem[id].num_entries);
+}
+RO_PMEM_ATTR(total_entries);
+
+static ssize_t show_pmem_quantum_size(int id, char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%u (%#x)\n",
+		pmem[id].quantum, pmem[id].quantum);
+}
+RO_PMEM_ATTR(quantum_size);
+
+static ssize_t show_pmem_buddy_bitmap_dump(int id, char *buf)
+{
+	int ret, i;
+
+	mutex_lock(&pmem[id].data_list_mutex);
+	ret = scnprintf(buf, PAGE_SIZE, "index\torder\tlength\tallocated\n");
+
+	for (i = 0; i < pmem[id].num_entries && (PAGE_SIZE - ret);
+			i = PMEM_BUDDY_NEXT_INDEX(id, i))
+		ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%d\t%d\t%d\t%d\n",
+			i, PMEM_BUDDY_ORDER(id, i),
+			PMEM_BUDDY_LEN(id, i),
+			!PMEM_IS_FREE_BUDDY(id, i));
+
+	mutex_unlock(&pmem[id].data_list_mutex);
+	return ret;
+}
+RO_PMEM_ATTR(buddy_bitmap_dump);
+
+#define PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS \
+	&pmem_attr_quantum_size.attr, \
+	&pmem_attr_total_entries.attr
+
+static struct attribute *pmem_buddy_bestfit_attrs[] = {
+	PMEM_COMMON_SYSFS_ATTRS,
+
+	PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS,
+
+	&pmem_attr_buddy_bitmap_dump.attr,
+
+	NULL
+};
+
+static struct kobj_type pmem_buddy_bestfit_ktype = {
+	.sysfs_ops = &pmem_ops,
+	.default_attrs = pmem_buddy_bestfit_attrs,
+};
+
+static ssize_t show_pmem_free_quanta(int id, char *buf)
+{
+	ssize_t ret;
+
+	mutex_lock(&pmem[id].arena_mutex);
+	ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+		pmem[id].allocator.bitmap.bitmap_free);
+	mutex_unlock(&pmem[id].arena_mutex);
+	return ret;
+}
+RO_PMEM_ATTR(free_quanta);
+
+static ssize_t show_pmem_bits_allocated(int id, char *buf)
+{
+	ssize_t ret;
+	unsigned int i;
+
+	mutex_lock(&pmem[id].arena_mutex);
+
+	ret = scnprintf(buf, PAGE_SIZE,
+		"id: %d\nbitnum\tindex\tquanta allocated\n", id);
+
+	for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++)
+		if (pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1)
+			ret += scnprintf(buf + ret, PAGE_SIZE - ret,
+				"%u\t%u\t%u\n",
+				i,
+				pmem[id].allocator.bitmap.bitm_alloc[i].bit,
+				pmem[id].allocator.bitmap.bitm_alloc[i].quanta
+				);
+
+	mutex_unlock(&pmem[id].arena_mutex);
+	return ret;
+}
+RO_PMEM_ATTR(bits_allocated);
+
+static struct attribute *pmem_bitmap_attrs[] = {
+	PMEM_COMMON_SYSFS_ATTRS,
+
+	PMEM_BITMAP_BUDDY_BESTFIT_COMMON_SYSFS_ATTRS,
+
+	&pmem_attr_free_quanta.attr,
+	&pmem_attr_bits_allocated.attr,
+
+	NULL
+};
+
+static struct attribute *pmem_system_attrs[] = {
+	PMEM_COMMON_SYSFS_ATTRS,
+
+	NULL
+};
+
+static struct kobj_type pmem_bitmap_ktype = {
+	.sysfs_ops = &pmem_ops,
+	.default_attrs = pmem_bitmap_attrs,
+};
+
+static struct kobj_type pmem_system_ktype = {
+	.sysfs_ops = &pmem_ops,
+	.default_attrs = pmem_system_attrs,
+};
+
+static int pmem_allocate_from_id(const int id, const unsigned long size,
+						const unsigned int align)
+{
+	int ret;
+	ret = pmem_get_region(id);
+
+	if (ret)
+		return -1;
+
+	ret = pmem[id].allocate(id, size, align);
+
+	if (ret < 0)
+		pmem_put_region(id);
+
+	return ret;
+}
+
+static int pmem_free_from_id(const int id, const int index)
+{
+	pmem_put_region(id);
+	return pmem[id].free(id, index);
+}
+
+static int pmem_get_region(int id)
+{
+	/* Must be called with arena mutex locked */
+	atomic_inc(&pmem[id].allocation_cnt);
+	if (!pmem[id].vbase) {
+		DLOG("PMEMDEBUG: mapping for %s", pmem[id].name);
+		if (pmem[id].mem_request) {
+			int ret = pmem[id].mem_request(pmem[id].region_data);
+			if (ret) {
+				atomic_dec(&pmem[id].allocation_cnt);
+				return 1;
+			}
+		}
+		ioremap_pmem(id);
+	}
+
+	if (pmem[id].vbase) {
+		return 0;
+	} else {
+		if (pmem[id].mem_release)
+			pmem[id].mem_release(pmem[id].region_data);
+		atomic_dec(&pmem[id].allocation_cnt);
+		return 1;
+	}
+}
+
+static void pmem_put_region(int id)
+{
+	/* Must be called with arena mutex locked */
+	if (atomic_dec_and_test(&pmem[id].allocation_cnt)) {
+		DLOG("PMEMDEBUG: unmapping for %s", pmem[id].name);
+		BUG_ON(!pmem[id].vbase);
+		if (pmem[id].map_on_demand) {
+			/* unmap_kernel_range() flushes the caches
+			 * and removes the page table entries
+			 */
+			unmap_kernel_range((unsigned long)pmem[id].vbase,
+				 pmem[id].size);
+			pmem[id].vbase = NULL;
+			if (pmem[id].mem_release) {
+				int ret = pmem[id].mem_release(
+						pmem[id].region_data);
+				WARN(ret, "mem_release failed");
+			}
+
+		}
+	}
+}
+
+static int get_id(struct file *file)
+{
+	return MINOR(file->f_dentry->d_inode->i_rdev);
+}
+
+static char *get_name(struct file *file)
+{
+	int id = get_id(file);
+	return pmem[id].name;
+}
+
+static int is_pmem_file(struct file *file)
+{
+	int id;
+
+	if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
+		return 0;
+
+	id = get_id(file);
+	return (unlikely(id >= PMEM_MAX_DEVICES ||
+		file->f_dentry->d_inode->i_rdev !=
+		     MKDEV(MISC_MAJOR, pmem[id].dev.minor))) ? 0 : 1;
+}
+
+static int has_allocation(struct file *file)
+{
+	/* must be called with at least read lock held on
+	 * ((struct pmem_data *)(file->private_data))->sem which
+	 * means that file is guaranteed not to be NULL upon entry!!
+	 * check is_pmem_file first if not accessed via pmem_file_ops */
+	struct pmem_data *pdata = file->private_data;
+	return pdata && pdata->index != -1;
+}
+
+static int is_master_owner(struct file *file)
+{
+	struct file *master_file;
+	struct pmem_data *data = file->private_data;
+	int put_needed, ret = 0;
+
+	if (!has_allocation(file))
+		return 0;
+	if (PMEM_FLAGS_MASTERMAP & data->flags)
+		return 1;
+	master_file = fget_light(data->master_fd, &put_needed);
+	if (master_file && data->master_file == master_file)
+		ret = 1;
+	if (master_file)
+		fput_light(master_file, put_needed);
+	return ret;
+}
+
+static int pmem_free_all_or_nothing(int id, int index)
+{
+	/* caller should hold the lock on arena_mutex! */
+	DLOG("index %d\n", index);
+
+	pmem[id].allocator.all_or_nothing.allocated =  0;
+	return 0;
+}
+
+static int pmem_free_space_all_or_nothing(int id,
+		struct pmem_freespace *fs)
+{
+	/* caller should hold the lock on arena_mutex! */
+	fs->total = (unsigned long)
+		pmem[id].allocator.all_or_nothing.allocated == 0 ?
+		pmem[id].size : 0;
+
+	fs->largest = fs->total;
+	return 0;
+}
+
+
+static int pmem_free_buddy_bestfit(int id, int index)
+{
+	/* caller should hold the lock on arena_mutex! */
+	int curr = index;
+	DLOG("index %d\n", index);
+
+
+	/* clean up the bitmap, merging any buddies */
+	pmem[id].allocator.buddy_bestfit.buddy_bitmap[curr].allocated = 0;
+	/* find a slots buddy Buddy# = Slot# ^ (1 << order)
+	 * if the buddy is also free merge them
+	 * repeat until the buddy is not free or end of the bitmap is reached
+	 */
+	do {
+		int buddy = PMEM_BUDDY_INDEX(id, curr);
+		if (buddy < pmem[id].num_entries &&
+		    PMEM_IS_FREE_BUDDY(id, buddy) &&
+		    PMEM_BUDDY_ORDER(id, buddy) ==
+				PMEM_BUDDY_ORDER(id, curr)) {
+			PMEM_BUDDY_ORDER(id, buddy)++;
+			PMEM_BUDDY_ORDER(id, curr)++;
+			curr = min(buddy, curr);
+		} else {
+			break;
+		}
+	} while (curr < pmem[id].num_entries);
+
+	return 0;
+}
+
+
+static int pmem_free_space_buddy_bestfit(int id,
+		struct pmem_freespace *fs)
+{
+	/* caller should hold the lock on arena_mutex! */
+	int curr;
+	unsigned long size;
+	fs->total = 0;
+	fs->largest = 0;
+
+	for (curr = 0; curr < pmem[id].num_entries;
+	     curr = PMEM_BUDDY_NEXT_INDEX(id, curr)) {
+		if (PMEM_IS_FREE_BUDDY(id, curr)) {
+			size = PMEM_BUDDY_LEN(id, curr);
+			if (size > fs->largest)
+				fs->largest = size;
+			fs->total += size;
+		}
+	}
+	return 0;
+}
+
+
+static inline uint32_t start_mask(int bit_start)
+{
+	return (uint32_t)(~0) << (bit_start & PMEM_BITS_PER_WORD_MASK);
+}
+
+static inline uint32_t end_mask(int bit_end)
+{
+	return (uint32_t)(~0) >>
+		((BITS_PER_LONG - bit_end) & PMEM_BITS_PER_WORD_MASK);
+}
+
+static inline int compute_total_words(int bit_end, int word_index)
+{
+	return ((bit_end + BITS_PER_LONG - 1) >>
+			PMEM_32BIT_WORD_ORDER) - word_index;
+}
+
+static void bitmap_bits_clear_all(uint32_t *bitp, int bit_start, int bit_end)
+{
+	int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words;
+
+	total_words = compute_total_words(bit_end, word_index);
+	if (total_words > 0) {
+		if (total_words == 1) {
+			bitp[word_index] &=
+				~(start_mask(bit_start) & end_mask(bit_end));
+		} else {
+			bitp[word_index++] &= ~start_mask(bit_start);
+			if (total_words > 2) {
+				int total_bytes;
+
+				total_words -= 2;
+				total_bytes = total_words << 2;
+
+				memset(&bitp[word_index], 0, total_bytes);
+				word_index += total_words;
+			}
+			bitp[word_index] &= ~end_mask(bit_end);
+		}
+	}
+}
+
+static int pmem_free_bitmap(int id, int bitnum)
+{
+	/* caller should hold the lock on arena_mutex! */
+	int i;
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+
+	DLOG("bitnum %d\n", bitnum);
+
+	for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++) {
+		const int curr_bit =
+			pmem[id].allocator.bitmap.bitm_alloc[i].bit;
+
+		if (curr_bit == bitnum) {
+			const int curr_quanta =
+				pmem[id].allocator.bitmap.bitm_alloc[i].quanta;
+
+			bitmap_bits_clear_all(pmem[id].allocator.bitmap.bitmap,
+				curr_bit, curr_bit + curr_quanta);
+			pmem[id].allocator.bitmap.bitmap_free += curr_quanta;
+			pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1;
+			pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
+			return 0;
+		}
+	}
+	printk(KERN_ALERT "pmem: %s: Attempt to free unallocated index %d, id"
+		" %d, pid %d(%s)\n", __func__, bitnum, id,  current->pid,
+		get_task_comm(currtask_name, current));
+
+	return -1;
+}
+
+static int pmem_free_system(int id, int index)
+{
+	/* caller should hold the lock on arena_mutex! */
+	struct alloc_list *item;
+
+	DLOG("index %d\n", index);
+	if (index != 0)
+		item = (struct alloc_list *)index;
+	else
+		return 0;
+
+	if (item->vaddr != NULL) {
+		iounmap(item->vaddr);
+		kfree(__va(item->addr));
+		list_del(&item->allocs);
+		kfree(item);
+	}
+
+	return 0;
+}
+
+static int pmem_free_space_bitmap(int id, struct pmem_freespace *fs)
+{
+	int i, j;
+	int max_allocs = pmem[id].allocator.bitmap.bitmap_allocs;
+	int alloc_start = 0;
+	int next_alloc;
+	unsigned long size = 0;
+
+	fs->total = 0;
+	fs->largest = 0;
+
+	for (i = 0; i < max_allocs; i++) {
+
+		int alloc_quanta = 0;
+		int alloc_idx = 0;
+		next_alloc = pmem[id].num_entries;
+
+		/* Look for the lowest bit where next allocation starts */
+		for (j = 0; j < max_allocs; j++) {
+			const int curr_alloc = pmem[id].allocator.
+						bitmap.bitm_alloc[j].bit;
+			if (curr_alloc != -1) {
+				if (alloc_start == curr_alloc)
+					alloc_idx = j;
+				if (alloc_start >= curr_alloc)
+					continue;
+				if (curr_alloc < next_alloc)
+					next_alloc = curr_alloc;
+			}
+		}
+		alloc_quanta = pmem[id].allocator.bitmap.
+				bitm_alloc[alloc_idx].quanta;
+		size = (next_alloc - (alloc_start + alloc_quanta)) *
+				pmem[id].quantum;
+
+		if (size > fs->largest)
+			fs->largest = size;
+		fs->total += size;
+
+		if (next_alloc == pmem[id].num_entries)
+			break;
+		else
+			alloc_start = next_alloc;
+	}
+
+	return 0;
+}
+
+static int pmem_free_space_system(int id, struct pmem_freespace *fs)
+{
+	fs->total = pmem[id].size;
+	fs->largest = pmem[id].size;
+
+	return 0;
+}
+
+static void pmem_revoke(struct file *file, struct pmem_data *data);
+
+static int pmem_release(struct inode *inode, struct file *file)
+{
+	struct pmem_data *data = file->private_data;
+	struct pmem_region_node *region_node;
+	struct list_head *elt, *elt2;
+	int id = get_id(file), ret = 0;
+
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+	DLOG("releasing memory pid %u(%s) file %p(%ld) dev %s(id: %d)\n",
+		current->pid, get_task_comm(currtask_name, current),
+		file, file_count(file), get_name(file), id);
+	mutex_lock(&pmem[id].data_list_mutex);
+	/* if this file is a master, revoke all the memory in the connected
+	 *  files */
+	if (PMEM_FLAGS_MASTERMAP & data->flags) {
+		list_for_each(elt, &pmem[id].data_list) {
+			struct pmem_data *sub_data =
+				list_entry(elt, struct pmem_data, list);
+			int is_master;
+
+			down_read(&sub_data->sem);
+			is_master = (PMEM_IS_SUBMAP(sub_data) &&
+				file == sub_data->master_file);
+			up_read(&sub_data->sem);
+
+			if (is_master)
+				pmem_revoke(file, sub_data);
+		}
+	}
+	list_del(&data->list);
+	mutex_unlock(&pmem[id].data_list_mutex);
+
+	down_write(&data->sem);
+
+	/* if it is not a connected file and it has an allocation, free it */
+	if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
+		mutex_lock(&pmem[id].arena_mutex);
+		ret = pmem_free_from_id(id, data->index);
+		mutex_unlock(&pmem[id].arena_mutex);
+	}
+
+	/* if this file is a submap (mapped, connected file), downref the
+	 * task struct */
+	if (PMEM_FLAGS_SUBMAP & data->flags)
+		if (data->task) {
+			put_task_struct(data->task);
+			data->task = NULL;
+		}
+
+	file->private_data = NULL;
+
+	list_for_each_safe(elt, elt2, &data->region_list) {
+		region_node = list_entry(elt, struct pmem_region_node, list);
+		list_del(elt);
+		kfree(region_node);
+	}
+	BUG_ON(!list_empty(&data->region_list));
+
+	up_write(&data->sem);
+	kfree(data);
+	if (pmem[id].release)
+		ret = pmem[id].release(inode, file);
+
+	return ret;
+}
+
+static int pmem_open(struct inode *inode, struct file *file)
+{
+	struct pmem_data *data;
+	int id = get_id(file);
+	int ret = 0;
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+
+	DLOG("pid %u(%s) file %p(%ld) dev %s(id: %d)\n",
+		current->pid, get_task_comm(currtask_name, current),
+		file, file_count(file), get_name(file), id);
+	data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
+	if (!data) {
+		printk(KERN_ALERT "pmem: %s: unable to allocate memory for "
+				"pmem metadata.", __func__);
+		return -1;
+	}
+	data->flags = 0;
+	data->index = -1;
+	data->task = NULL;
+	data->vma = NULL;
+	data->pid = 0;
+	data->master_file = NULL;
+#if PMEM_DEBUG
+	data->ref = 0;
+#endif
+	INIT_LIST_HEAD(&data->region_list);
+	init_rwsem(&data->sem);
+
+	file->private_data = data;
+	INIT_LIST_HEAD(&data->list);
+
+	mutex_lock(&pmem[id].data_list_mutex);
+	list_add(&data->list, &pmem[id].data_list);
+	mutex_unlock(&pmem[id].data_list_mutex);
+	return ret;
+}
+
+static unsigned long pmem_order(unsigned long len, int id)
+{
+	int i;
+
+	len = (len + pmem[id].quantum - 1)/pmem[id].quantum;
+	len--;
+	for (i = 0; i < sizeof(len)*8; i++)
+		if (len >> i == 0)
+			break;
+	return i;
+}
+
+static int pmem_allocator_all_or_nothing(const int id,
+		const unsigned long len,
+		const unsigned int align)
+{
+	/* caller should hold the lock on arena_mutex! */
+	DLOG("all or nothing\n");
+	if ((len > pmem[id].size) ||
+		pmem[id].allocator.all_or_nothing.allocated)
+		return -1;
+	pmem[id].allocator.all_or_nothing.allocated = 1;
+	return len;
+}
+
+static int pmem_allocator_buddy_bestfit(const int id,
+		const unsigned long len,
+		unsigned int align)
+{
+	/* caller should hold the lock on arena_mutex! */
+	int curr;
+	int best_fit = -1;
+	unsigned long order;
+
+	DLOG("buddy bestfit\n");
+	order = pmem_order(len, id);
+	if (order > PMEM_MAX_ORDER)
+		goto out;
+
+	DLOG("order %lx\n", order);
+
+	/* Look through the bitmap.
+	 * 	If a free slot of the correct order is found, use it.
+	 * 	Otherwise, use the best fit (smallest with size > order) slot.
+	 */
+	for (curr = 0;
+	     curr < pmem[id].num_entries;
+	     curr = PMEM_BUDDY_NEXT_INDEX(id, curr))
+		if (PMEM_IS_FREE_BUDDY(id, curr)) {
+			if (PMEM_BUDDY_ORDER(id, curr) ==
+					(unsigned char)order) {
+				/* set the not free bit and clear others */
+				best_fit = curr;
+				break;
+			}
+			if (PMEM_BUDDY_ORDER(id, curr) >
+					(unsigned char)order &&
+			    (best_fit < 0 ||
+			     PMEM_BUDDY_ORDER(id, curr) <
+					PMEM_BUDDY_ORDER(id, best_fit)))
+				best_fit = curr;
+		}
+
+	/* if best_fit < 0, there are no suitable slots; return an error */
+	if (best_fit < 0) {
+#if PMEM_DEBUG
+		printk(KERN_ALERT "pmem: %s: no space left to allocate!\n",
+			__func__);
+#endif
+		goto out;
+	}
+
+	/* now partition the best fit:
+	 * 	split the slot into 2 buddies of order - 1
+	 * 	repeat until the slot is of the correct order
+	 */
+	while (PMEM_BUDDY_ORDER(id, best_fit) > (unsigned char)order) {
+		int buddy;
+		PMEM_BUDDY_ORDER(id, best_fit) -= 1;
+		buddy = PMEM_BUDDY_INDEX(id, best_fit);
+		PMEM_BUDDY_ORDER(id, buddy) = PMEM_BUDDY_ORDER(id, best_fit);
+	}
+	pmem[id].allocator.buddy_bestfit.buddy_bitmap[best_fit].allocated = 1;
+out:
+	return best_fit;
+}
+
+
+static inline unsigned long paddr_from_bit(const int id, const int bitnum)
+{
+	return pmem[id].base + pmem[id].quantum * bitnum;
+}
+
+static inline unsigned long bit_from_paddr(const int id,
+		const unsigned long paddr)
+{
+	return (paddr - pmem[id].base) / pmem[id].quantum;
+}
+
+static void bitmap_bits_set_all(uint32_t *bitp, int bit_start, int bit_end)
+{
+	int word_index = bit_start >> PMEM_32BIT_WORD_ORDER, total_words;
+
+	total_words = compute_total_words(bit_end, word_index);
+	if (total_words > 0) {
+		if (total_words == 1) {
+			bitp[word_index] |=
+				(start_mask(bit_start) & end_mask(bit_end));
+		} else {
+			bitp[word_index++] |= start_mask(bit_start);
+			if (total_words > 2) {
+				int total_bytes;
+
+				total_words -= 2;
+				total_bytes = total_words << 2;
+
+				memset(&bitp[word_index], ~0, total_bytes);
+				word_index += total_words;
+			}
+			bitp[word_index] |= end_mask(bit_end);
+		}
+	}
+}
+
+static int
+bitmap_allocate_contiguous(uint32_t *bitp, int num_bits_to_alloc,
+		int total_bits, int spacing, int start_bit)
+{
+	int bit_start, last_bit, word_index;
+
+	if (num_bits_to_alloc <= 0)
+		return -1;
+
+	for (bit_start = start_bit; ;
+		bit_start = ((last_bit +
+			(word_index << PMEM_32BIT_WORD_ORDER) + spacing - 1)
+			& ~(spacing - 1)) + start_bit) {
+		int bit_end = bit_start + num_bits_to_alloc, total_words;
+
+		if (bit_end > total_bits)
+			return -1; /* out of contiguous memory */
+
+		word_index = bit_start >> PMEM_32BIT_WORD_ORDER;
+		total_words = compute_total_words(bit_end, word_index);
+
+		if (total_words <= 0)
+			return -1;
+
+		if (total_words == 1) {
+			last_bit = fls(bitp[word_index] &
+					(start_mask(bit_start) &
+						end_mask(bit_end)));
+			if (last_bit)
+				continue;
+		} else {
+			int end_word = word_index + (total_words - 1);
+			last_bit =
+				fls(bitp[word_index] & start_mask(bit_start));
+			if (last_bit)
+				continue;
+
+			for (word_index++;
+					word_index < end_word;
+					word_index++) {
+				last_bit = fls(bitp[word_index]);
+				if (last_bit)
+					break;
+			}
+			if (last_bit)
+				continue;
+
+			last_bit = fls(bitp[word_index] & end_mask(bit_end));
+			if (last_bit)
+				continue;
+		}
+		bitmap_bits_set_all(bitp, bit_start, bit_end);
+		return bit_start;
+	}
+	return -1;
+}
+
+static int reserve_quanta(const unsigned int quanta_needed,
+		const int id,
+		unsigned int align)
+{
+	/* alignment should be a valid power of 2 */
+	int ret = -1, start_bit = 0, spacing = 1;
+
+	/* Sanity check */
+	if (quanta_needed > pmem[id].allocator.bitmap.bitmap_free) {
+#if PMEM_DEBUG
+		printk(KERN_ALERT "pmem: %s: request (%d) too big for"
+			" available free (%d)\n", __func__, quanta_needed,
+			pmem[id].allocator.bitmap.bitmap_free);
+#endif
+		return -1;
+	}
+
+	start_bit = bit_from_paddr(id,
+		(pmem[id].base + align - 1) & ~(align - 1));
+	if (start_bit <= -1) {
+#if PMEM_DEBUG
+		printk(KERN_ALERT
+			"pmem: %s: bit_from_paddr fails for"
+			" %u alignment.\n", __func__, align);
+#endif
+		return -1;
+	}
+	spacing = align / pmem[id].quantum;
+	spacing = spacing > 1 ? spacing : 1;
+
+	ret = bitmap_allocate_contiguous(pmem[id].allocator.bitmap.bitmap,
+		quanta_needed,
+		(pmem[id].size + pmem[id].quantum - 1) / pmem[id].quantum,
+		spacing,
+		start_bit);
+
+#if PMEM_DEBUG
+	if (ret < 0)
+		printk(KERN_ALERT "pmem: %s: not enough contiguous bits free "
+			"in bitmap! Region memory is either too fragmented or"
+			" request is too large for available memory.\n",
+			__func__);
+#endif
+
+	return ret;
+}
+
+static int pmem_allocator_bitmap(const int id,
+		const unsigned long len,
+		const unsigned int align)
+{
+	/* caller should hold the lock on arena_mutex! */
+	int bitnum, i;
+	unsigned int quanta_needed;
+
+	DLOG("bitmap id %d, len %ld, align %u\n", id, len, align);
+	if (!pmem[id].allocator.bitmap.bitm_alloc) {
+#if PMEM_DEBUG
+		printk(KERN_ALERT "pmem: bitm_alloc not present! id: %d\n",
+			id);
+#endif
+		return -1;
+	}
+
+	quanta_needed = (len + pmem[id].quantum - 1) / pmem[id].quantum;
+	DLOG("quantum size %u quanta needed %u free %u id %d\n",
+		pmem[id].quantum, quanta_needed,
+		pmem[id].allocator.bitmap.bitmap_free, id);
+
+	if (pmem[id].allocator.bitmap.bitmap_free < quanta_needed) {
+#if PMEM_DEBUG
+		printk(KERN_ALERT "pmem: memory allocation failure. "
+			"PMEM memory region exhausted, id %d."
+			" Unable to comply with allocation request.\n", id);
+#endif
+		return -1;
+	}
+
+	bitnum = reserve_quanta(quanta_needed, id, align);
+	if (bitnum == -1)
+		goto leave;
+
+	for (i = 0;
+		i < pmem[id].allocator.bitmap.bitmap_allocs &&
+			pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1;
+		i++)
+		;
+
+	if (i >= pmem[id].allocator.bitmap.bitmap_allocs) {
+		void *temp;
+		int32_t new_bitmap_allocs =
+			pmem[id].allocator.bitmap.bitmap_allocs << 1;
+		int j;
+
+		if (!new_bitmap_allocs) { /* failed sanity check!! */
+#if PMEM_DEBUG
+			pr_alert("pmem: bitmap_allocs number"
+				" wrapped around to zero! Something "
+				"is VERY wrong.\n");
+#endif
+			return -1;
+		}
+
+		if (new_bitmap_allocs > pmem[id].num_entries) {
+			/* failed sanity check!! */
+#if PMEM_DEBUG
+			pr_alert("pmem: required bitmap_allocs"
+				" number exceeds maximum entries possible"
+				" for current quanta\n");
+#endif
+			return -1;
+		}
+
+		temp = krealloc(pmem[id].allocator.bitmap.bitm_alloc,
+				new_bitmap_allocs *
+				sizeof(*pmem[id].allocator.bitmap.bitm_alloc),
+				GFP_KERNEL);
+		if (!temp) {
+#if PMEM_DEBUG
+			pr_alert("pmem: can't realloc bitmap_allocs,"
+				"id %d, current num bitmap allocs %d\n",
+				id, pmem[id].allocator.bitmap.bitmap_allocs);
+#endif
+			return -1;
+		}
+		pmem[id].allocator.bitmap.bitmap_allocs = new_bitmap_allocs;
+		pmem[id].allocator.bitmap.bitm_alloc = temp;
+
+		for (j = i; j < new_bitmap_allocs; j++) {
+			pmem[id].allocator.bitmap.bitm_alloc[j].bit = -1;
+			pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
+		}
+
+		DLOG("increased # of allocated regions to %d for id %d\n",
+			pmem[id].allocator.bitmap.bitmap_allocs, id);
+	}
+
+	DLOG("bitnum %d, bitm_alloc index %d\n", bitnum, i);
+
+	pmem[id].allocator.bitmap.bitmap_free -= quanta_needed;
+	pmem[id].allocator.bitmap.bitm_alloc[i].bit = bitnum;
+	pmem[id].allocator.bitmap.bitm_alloc[i].quanta = quanta_needed;
+leave:
+	return bitnum;
+}
+
+static int pmem_allocator_system(const int id,
+		const unsigned long len,
+		const unsigned int align)
+{
+	/* caller should hold the lock on arena_mutex! */
+	struct alloc_list *list;
+	unsigned long aligned_len;
+	int count = SYSTEM_ALLOC_RETRY;
+	void *buf;
+
+	DLOG("system id %d, len %ld, align %u\n", id, len, align);
+
+	if ((pmem[id].allocator.system_mem.used + len) > pmem[id].size) {
+		DLOG("requested size would be larger than quota\n");
+		return -1;
+	}
+
+	/* Handle alignment */
+	aligned_len = len + align;
+
+	/* Attempt allocation */
+	list = kmalloc(sizeof(struct alloc_list), GFP_KERNEL);
+	if (list == NULL) {
+		printk(KERN_ERR "pmem: failed to allocate system metadata\n");
+		return -1;
+	}
+	list->vaddr = NULL;
+
+	buf = NULL;
+	while ((buf == NULL) && count--) {
+		buf = kmalloc((aligned_len), GFP_KERNEL);
+		if (buf == NULL) {
+			DLOG("pmem: kmalloc %d temporarily failed len= %ld\n",
+				count, aligned_len);
+		}
+	}
+	if (!buf) {
+		printk(KERN_CRIT "pmem: kmalloc failed for id= %d len= %ld\n",
+			id, aligned_len);
+		kfree(list);
+		return -1;
+	}
+	list->size = aligned_len;
+	list->addr = (void *)__pa(buf);
+	list->aaddr = (void *)(((unsigned int)(list->addr) + (align - 1)) &
+			~(align - 1));
+
+	if (!pmem[id].cached)
+		list->vaddr = ioremap(__pa(buf), aligned_len);
+	else
+		list->vaddr = ioremap_cached(__pa(buf), aligned_len);
+
+	INIT_LIST_HEAD(&list->allocs);
+	list_add(&list->allocs, &pmem[id].allocator.system_mem.alist);
+
+	return (int)list;
+}
+
+static pgprot_t pmem_phys_mem_access_prot(struct file *file, pgprot_t vma_prot)
+{
+	int id = get_id(file);
+#ifdef pgprot_writecombine
+	if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
+		/* on ARMv6 and ARMv7 this expands to Normal Noncached */
+		return pgprot_writecombine(vma_prot);
+#endif
+#ifdef pgprot_ext_buffered
+	else if (pmem[id].buffered)
+		return pgprot_ext_buffered(vma_prot);
+#endif
+	return vma_prot;
+}
+
+static unsigned long pmem_start_addr_all_or_nothing(int id,
+		struct pmem_data *data)
+{
+	return PMEM_START_ADDR(id, 0);
+}
+
+static unsigned long pmem_start_addr_buddy_bestfit(int id,
+		struct pmem_data *data)
+{
+	return PMEM_START_ADDR(id, data->index);
+}
+
+static unsigned long pmem_start_addr_bitmap(int id, struct pmem_data *data)
+{
+	return data->index * pmem[id].quantum + pmem[id].base;
+}
+
+static unsigned long pmem_start_addr_system(int id, struct pmem_data *data)
+{
+	return (unsigned long)(((struct alloc_list *)(data->index))->aaddr);
+}
+
+static void *pmem_start_vaddr(int id, struct pmem_data *data)
+{
+	if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM)
+		return ((struct alloc_list *)(data->index))->vaddr;
+	else
+	return pmem[id].start_addr(id, data) - pmem[id].base + pmem[id].vbase;
+}
+
+static unsigned long pmem_len_all_or_nothing(int id, struct pmem_data *data)
+{
+	return data->index;
+}
+
+static unsigned long pmem_len_buddy_bestfit(int id, struct pmem_data *data)
+{
+	return PMEM_BUDDY_LEN(id, data->index);
+}
+
+static unsigned long pmem_len_bitmap(int id, struct pmem_data *data)
+{
+	int i;
+	unsigned long ret = 0;
+
+	mutex_lock(&pmem[id].arena_mutex);
+
+	for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++)
+		if (pmem[id].allocator.bitmap.bitm_alloc[i].bit ==
+				data->index) {
+			ret = pmem[id].allocator.bitmap.bitm_alloc[i].quanta *
+				pmem[id].quantum;
+			break;
+		}
+
+	mutex_unlock(&pmem[id].arena_mutex);
+#if PMEM_DEBUG
+	if (i >= pmem[id].allocator.bitmap.bitmap_allocs)
+		pr_alert("pmem: %s: can't find bitnum %d in "
+			"alloc'd array!\n", __func__, data->index);
+#endif
+	return ret;
+}
+
+static unsigned long pmem_len_system(int id, struct pmem_data *data)
+{
+	unsigned long ret = 0;
+
+	mutex_lock(&pmem[id].arena_mutex);
+
+	ret = ((struct alloc_list *)data->index)->size;
+	mutex_unlock(&pmem[id].arena_mutex);
+
+	return ret;
+}
+
+static int pmem_map_garbage(int id, struct vm_area_struct *vma,
+			    struct pmem_data *data, unsigned long offset,
+			    unsigned long len)
+{
+	int i, garbage_pages = len >> PAGE_SHIFT;
+
+	vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
+	for (i = 0; i < garbage_pages; i++) {
+		if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
+		    pmem[id].garbage_pfn))
+			return -EAGAIN;
+	}
+	return 0;
+}
+
+static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
+				struct pmem_data *data, unsigned long offset,
+				unsigned long len)
+{
+	int garbage_pages;
+	DLOG("unmap offset %lx len %lx\n", offset, len);
+
+	BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
+
+	garbage_pages = len >> PAGE_SHIFT;
+	zap_page_range(vma, vma->vm_start + offset, len, NULL);
+	pmem_map_garbage(id, vma, data, offset, len);
+	return 0;
+}
+
+static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
+			      struct pmem_data *data, unsigned long offset,
+			      unsigned long len)
+{
+	int ret;
+	DLOG("map offset %lx len %lx\n", offset, len);
+	BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
+	BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
+	BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
+	BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
+
+	ret = io_remap_pfn_range(vma, vma->vm_start + offset,
+		(pmem[id].start_addr(id, data) + offset) >> PAGE_SHIFT,
+		len, vma->vm_page_prot);
+	if (ret) {
+#if PMEM_DEBUG
+		pr_alert("pmem: %s: io_remap_pfn_range fails with "
+			"return value: %d!\n",	__func__, ret);
+#endif
+
+		ret = -EAGAIN;
+	}
+	return ret;
+}
+
+static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
+			      struct pmem_data *data, unsigned long offset,
+			      unsigned long len)
+{
+	/* hold the mm semp for the vma you are modifying when you call this */
+	BUG_ON(!vma);
+	zap_page_range(vma, vma->vm_start + offset, len, NULL);
+	return pmem_map_pfn_range(id, vma, data, offset, len);
+}
+
+static void pmem_vma_open(struct vm_area_struct *vma)
+{
+	struct file *file = vma->vm_file;
+	struct pmem_data *data = file->private_data;
+	int id = get_id(file);
+
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+	DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n",
+		get_name(file), id, current->pid,
+		get_task_comm(currtask_name, current),
+		current->parent->pid, file, file_count(file));
+	/* this should never be called as we don't support copying pmem
+	 * ranges via fork */
+	down_read(&data->sem);
+	BUG_ON(!has_allocation(file));
+	/* remap the garbage pages, forkers don't get access to the data */
+	pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
+	up_read(&data->sem);
+}
+
+static void pmem_vma_close(struct vm_area_struct *vma)
+{
+	struct file *file = vma->vm_file;
+	struct pmem_data *data = file->private_data;
+
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+	DLOG("Dev %s(id: %d) pid %u(%s) ppid %u file %p count %ld\n",
+		get_name(file), get_id(file), current->pid,
+		get_task_comm(currtask_name, current),
+		current->parent->pid, file, file_count(file));
+
+	if (unlikely(!is_pmem_file(file))) {
+		pr_warning("pmem: something is very wrong, you are "
+		       "closing a vm backing an allocation that doesn't "
+		       "exist!\n");
+		return;
+	}
+
+	down_write(&data->sem);
+	if (unlikely(!has_allocation(file))) {
+		up_write(&data->sem);
+		pr_warning("pmem: something is very wrong, you are "
+		       "closing a vm backing an allocation that doesn't "
+		       "exist!\n");
+		return;
+	}
+	if (data->vma == vma) {
+		data->vma = NULL;
+		if ((data->flags & PMEM_FLAGS_CONNECTED) &&
+		    (data->flags & PMEM_FLAGS_SUBMAP))
+			data->flags |= PMEM_FLAGS_UNSUBMAP;
+	}
+	/* the kernel is going to free this vma now anyway */
+	up_write(&data->sem);
+}
+
+static struct vm_operations_struct vm_ops = {
+	.open = pmem_vma_open,
+	.close = pmem_vma_close,
+};
+
+static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct pmem_data *data = file->private_data;
+	int index = -1;
+	unsigned long vma_size =  vma->vm_end - vma->vm_start;
+	int ret = 0, id = get_id(file);
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+
+	if (!data) {
+		pr_err("pmem: Invalid file descriptor, no private data\n");
+		return -EINVAL;
+	}
+	DLOG("pid %u(%s) mmap vma_size %lu on dev %s(id: %d)\n", current->pid,
+		get_task_comm(currtask_name, current), vma_size,
+		get_name(file), id);
+	if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
+#if PMEM_DEBUG
+		pr_err("pmem: mmaps must be at offset zero, aligned"
+				" and a multiple of pages_size.\n");
+#endif
+		return -EINVAL;
+	}
+
+	down_write(&data->sem);
+	/* check this file isn't already mmaped, for submaps check this file
+	 * has never been mmaped */
+	if ((data->flags & PMEM_FLAGS_SUBMAP) ||
+	    (data->flags & PMEM_FLAGS_UNSUBMAP)) {
+#if PMEM_DEBUG
+		pr_err("pmem: you can only mmap a pmem file once, "
+		       "this file is already mmaped. %x\n", data->flags);
+#endif
+		ret = -EINVAL;
+		goto error;
+	}
+	/* if file->private_data == unalloced, alloc*/
+	if (data->index == -1) {
+		mutex_lock(&pmem[id].arena_mutex);
+		index = pmem_allocate_from_id(id,
+				vma->vm_end - vma->vm_start,
+				SZ_4K);
+		mutex_unlock(&pmem[id].arena_mutex);
+		/* either no space was available or an error occured */
+		if (index == -1) {
+			pr_err("pmem: mmap unable to allocate memory"
+				"on %s\n", get_name(file));
+			ret = -ENOMEM;
+			goto error;
+		}
+		/* store the index of a successful allocation */
+		data->index = index;
+	}
+
+	if (pmem[id].len(id, data) < vma_size) {
+#if PMEM_DEBUG
+		pr_err("pmem: mmap size [%lu] does not match"
+		       " size of backing region [%lu].\n", vma_size,
+		       pmem[id].len(id, data));
+#endif
+		ret = -EINVAL;
+		goto error;
+	}
+
+	vma->vm_pgoff = pmem[id].start_addr(id, data) >> PAGE_SHIFT;
+
+	vma->vm_page_prot = pmem_phys_mem_access_prot(file, vma->vm_page_prot);
+
+	if (data->flags & PMEM_FLAGS_CONNECTED) {
+		struct pmem_region_node *region_node;
+		struct list_head *elt;
+		if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
+			pr_alert("pmem: mmap failed in kernel!\n");
+			ret = -EAGAIN;
+			goto error;
+		}
+		list_for_each(elt, &data->region_list) {
+			region_node = list_entry(elt, struct pmem_region_node,
+						 list);
+			DLOG("remapping file: %p %lx %lx\n", file,
+				region_node->region.offset,
+				region_node->region.len);
+			if (pmem_remap_pfn_range(id, vma, data,
+						 region_node->region.offset,
+						 region_node->region.len)) {
+				ret = -EAGAIN;
+				goto error;
+			}
+		}
+		data->flags |= PMEM_FLAGS_SUBMAP;
+		get_task_struct(current->group_leader);
+		data->task = current->group_leader;
+		data->vma = vma;
+#if PMEM_DEBUG
+		data->pid = current->pid;
+#endif
+		DLOG("submmapped file %p vma %p pid %u\n", file, vma,
+		     current->pid);
+	} else {
+		if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
+			pr_err("pmem: mmap failed in kernel!\n");
+			ret = -EAGAIN;
+			goto error;
+		}
+		data->flags |= PMEM_FLAGS_MASTERMAP;
+		data->pid = current->pid;
+	}
+	vma->vm_ops = &vm_ops;
+error:
+	up_write(&data->sem);
+	return ret;
+}
+
+/* the following are the api for accessing pmem regions by other drivers
+ * from inside the kernel */
+int get_pmem_user_addr(struct file *file, unsigned long *start,
+		   unsigned long *len)
+{
+	int ret = -1;
+
+	if (is_pmem_file(file)) {
+		struct pmem_data *data = file->private_data;
+
+		down_read(&data->sem);
+		if (has_allocation(file)) {
+			if (data->vma) {
+				*start = data->vma->vm_start;
+				*len = data->vma->vm_end - data->vma->vm_start;
+			} else {
+				*start = *len = 0;
+#if PMEM_DEBUG
+				pr_err("pmem: %s: no vma present.\n",
+					__func__);
+#endif
+			}
+			ret = 0;
+		}
+		up_read(&data->sem);
+	}
+
+#if PMEM_DEBUG
+	if (ret)
+		pr_err("pmem: %s: requested pmem data from invalid"
+			"file.\n", __func__);
+#endif
+	return ret;
+}
+
+int get_pmem_addr(struct file *file, unsigned long *start,
+		  unsigned long *vstart, unsigned long *len)
+{
+	int ret = -1;
+
+	if (is_pmem_file(file)) {
+		struct pmem_data *data = file->private_data;
+
+		down_read(&data->sem);
+		if (has_allocation(file)) {
+			int id = get_id(file);
+
+			*start = pmem[id].start_addr(id, data);
+			*len = pmem[id].len(id, data);
+			*vstart = (unsigned long)
+				pmem_start_vaddr(id, data);
+			up_read(&data->sem);
+#if PMEM_DEBUG
+			down_write(&data->sem);
+			data->ref++;
+			up_write(&data->sem);
+#endif
+			DLOG("returning start %#lx len %lu "
+				"vstart %#lx\n",
+				*start, *len, *vstart);
+			ret = 0;
+		} else {
+			up_read(&data->sem);
+		}
+	}
+	return ret;
+}
+
+int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart,
+		  unsigned long *len, struct file **filp)
+{
+	int ret = -1;
+	struct file *file = fget(fd);
+
+	if (unlikely(file == NULL)) {
+		pr_err("pmem: %s: requested data from file "
+			"descriptor that doesn't exist.\n", __func__);
+	} else {
+#if PMEM_DEBUG_MSGS
+		char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+		DLOG("filp %p rdev %d pid %u(%s) file %p(%ld)"
+			" dev %s(id: %d)\n", filp,
+			file->f_dentry->d_inode->i_rdev,
+			current->pid, get_task_comm(currtask_name, current),
+			file, file_count(file), get_name(file), get_id(file));
+
+		if (!get_pmem_addr(file, start, vstart, len)) {
+			if (filp)
+				*filp = file;
+			ret = 0;
+		} else {
+			fput(file);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(get_pmem_file);
+
+int get_pmem_fd(int fd, unsigned long *start, unsigned long *len)
+{
+	unsigned long vstart;
+	return get_pmem_file(fd, start, &vstart, len, NULL);
+}
+EXPORT_SYMBOL(get_pmem_fd);
+
+void put_pmem_file(struct file *file)
+{
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+	DLOG("rdev %d pid %u(%s) file %p(%ld)" " dev %s(id: %d)\n",
+		file->f_dentry->d_inode->i_rdev, current->pid,
+		get_task_comm(currtask_name, current), file,
+		file_count(file), get_name(file), get_id(file));
+	if (is_pmem_file(file)) {
+#if PMEM_DEBUG
+		struct pmem_data *data = file->private_data;
+
+		down_write(&data->sem);
+		if (!data->ref--) {
+			data->ref++;
+			pr_alert("pmem: pmem_put > pmem_get %s "
+				"(pid %d)\n",
+			       pmem[get_id(file)].dev.name, data->pid);
+			BUG();
+		}
+		up_write(&data->sem);
+#endif
+		fput(file);
+	}
+}
+EXPORT_SYMBOL(put_pmem_file);
+
+void put_pmem_fd(int fd)
+{
+	int put_needed;
+	struct file *file = fget_light(fd, &put_needed);
+
+	if (file) {
+		put_pmem_file(file);
+		fput_light(file, put_needed);
+	}
+}
+
+void flush_pmem_fd(int fd, unsigned long offset, unsigned long len)
+{
+	int fput_needed;
+	struct file *file = fget_light(fd, &fput_needed);
+
+	if (file) {
+		flush_pmem_file(file, offset, len);
+		fput_light(file, fput_needed);
+	}
+}
+
+void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
+{
+	struct pmem_data *data;
+	int id;
+	void *vaddr;
+	struct pmem_region_node *region_node;
+	struct list_head *elt;
+	void *flush_start, *flush_end;
+#ifdef CONFIG_OUTER_CACHE
+	unsigned long phy_start, phy_end;
+#endif
+	if (!is_pmem_file(file))
+		return;
+
+	id = get_id(file);
+	if (!pmem[id].cached)
+		return;
+
+	/* is_pmem_file fails if !file */
+	data = file->private_data;
+
+	down_read(&data->sem);
+	if (!has_allocation(file))
+		goto end;
+
+	vaddr = pmem_start_vaddr(id, data);
+
+	if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM) {
+		dmac_flush_range(vaddr,
+			(void *)((unsigned long)vaddr +
+				 ((struct alloc_list *)(data->index))->size));
+#ifdef CONFIG_OUTER_CACHE
+		phy_start = pmem_start_addr_system(id, data);
+
+		phy_end = phy_start +
+			((struct alloc_list *)(data->index))->size;
+
+		outer_flush_range(phy_start, phy_end);
+#endif
+		goto end;
+	}
+	/* if this isn't a submmapped file, flush the whole thing */
+	if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
+		dmac_flush_range(vaddr, vaddr + pmem[id].len(id, data));
+#ifdef CONFIG_OUTER_CACHE
+		phy_start = (unsigned long)vaddr -
+				(unsigned long)pmem[id].vbase + pmem[id].base;
+
+		phy_end  =  phy_start + pmem[id].len(id, data);
+
+		outer_flush_range(phy_start, phy_end);
+#endif
+		goto end;
+	}
+	/* otherwise, flush the region of the file we are drawing */
+	list_for_each(elt, &data->region_list) {
+		region_node = list_entry(elt, struct pmem_region_node, list);
+		if ((offset >= region_node->region.offset) &&
+		    ((offset + len) <= (region_node->region.offset +
+			region_node->region.len))) {
+			flush_start = vaddr + region_node->region.offset;
+			flush_end = flush_start + region_node->region.len;
+			dmac_flush_range(flush_start, flush_end);
+#ifdef CONFIG_OUTER_CACHE
+
+			phy_start = (unsigned long)flush_start -
+				(unsigned long)pmem[id].vbase + pmem[id].base;
+
+			phy_end  =  phy_start + region_node->region.len;
+
+			outer_flush_range(phy_start, phy_end);
+#endif
+			break;
+		}
+	}
+end:
+	up_read(&data->sem);
+}
+
+int pmem_cache_maint(struct file *file, unsigned int cmd,
+		struct pmem_addr *pmem_addr)
+{
+	struct pmem_data *data;
+	int id;
+	unsigned long vaddr, paddr, length, offset,
+		      pmem_len, pmem_start_addr;
+
+	/* Called from kernel-space so file may be NULL */
+	if (!file)
+		return -EBADF;
+
+	/*
+	 * check that the vaddr passed for flushing is valid
+	 * so that you don't crash the kernel
+	 */
+	if (!pmem_addr->vaddr)
+		return -EINVAL;
+
+	data = file->private_data;
+	id = get_id(file);
+
+	if (!pmem[id].cached)
+		return 0;
+
+	offset = pmem_addr->offset;
+	length = pmem_addr->length;
+
+	down_read(&data->sem);
+	if (!has_allocation(file)) {
+		up_read(&data->sem);
+		return -EINVAL;
+	}
+	pmem_len = pmem[id].len(id, data);
+	pmem_start_addr = pmem[id].start_addr(id, data);
+	up_read(&data->sem);
+
+	if (offset + length > pmem_len)
+		return -EINVAL;
+
+	vaddr = pmem_addr->vaddr;
+	paddr = pmem_start_addr + offset;
+
+	DLOG("pmem cache maint on dev %s(id: %d)"
+		"(vaddr %lx paddr %lx len %lu bytes)\n",
+		get_name(file), id, vaddr, paddr, length);
+	if (cmd == PMEM_CLEAN_INV_CACHES)
+		clean_and_invalidate_caches(vaddr,
+				length, paddr);
+	else if (cmd == PMEM_CLEAN_CACHES)
+		clean_caches(vaddr, length, paddr);
+	else if (cmd == PMEM_INV_CACHES)
+		invalidate_caches(vaddr, length, paddr);
+
+	return 0;
+}
+EXPORT_SYMBOL(pmem_cache_maint);
+
+static int pmem_connect(unsigned long connect, struct file *file)
+{
+	int ret = 0, put_needed;
+	struct file *src_file;
+
+	if (!file) {
+		pr_err("pmem: %s: NULL file pointer passed in, "
+			"bailing out!\n", __func__);
+		ret = -EINVAL;
+		goto leave;
+	}
+
+	src_file = fget_light(connect, &put_needed);
+
+	if (!src_file) {
+		pr_err("pmem: %s: src file not found!\n", __func__);
+		ret = -EBADF;
+		goto leave;
+	}
+
+	if (src_file == file) { /* degenerative case, operator error */
+		pr_err("pmem: %s: src_file and passed in file are "
+			"the same; refusing to connect to self!\n", __func__);
+		ret = -EINVAL;
+		goto put_src_file;
+	}
+
+	if (unlikely(!is_pmem_file(src_file))) {
+		pr_err("pmem: %s: src file is not a pmem file!\n",
+			__func__);
+		ret = -EINVAL;
+		goto put_src_file;
+	} else {
+		struct pmem_data *src_data = src_file->private_data;
+
+		if (!src_data) {
+			pr_err("pmem: %s: src file pointer has no"
+				"private data, bailing out!\n", __func__);
+			ret = -EINVAL;
+			goto put_src_file;
+		}
+
+		down_read(&src_data->sem);
+
+		if (unlikely(!has_allocation(src_file))) {
+			up_read(&src_data->sem);
+			pr_err("pmem: %s: src file has no allocation!\n",
+				__func__);
+			ret = -EINVAL;
+		} else {
+			struct pmem_data *data;
+			int src_index = src_data->index;
+
+			up_read(&src_data->sem);
+
+			data = file->private_data;
+			if (!data) {
+				pr_err("pmem: %s: passed in file "
+					"pointer has no private data, bailing"
+					" out!\n", __func__);
+				ret = -EINVAL;
+				goto put_src_file;
+			}
+
+			down_write(&data->sem);
+			if (has_allocation(file) &&
+					(data->index != src_index)) {
+				up_write(&data->sem);
+
+				pr_err("pmem: %s: file is already "
+					"mapped but doesn't match this "
+					"src_file!\n", __func__);
+				ret = -EINVAL;
+			} else {
+				data->index = src_index;
+				data->flags |= PMEM_FLAGS_CONNECTED;
+				data->master_fd = connect;
+				data->master_file = src_file;
+
+				up_write(&data->sem);
+
+				DLOG("connect %p to %p\n", file, src_file);
+			}
+		}
+	}
+put_src_file:
+	fput_light(src_file, put_needed);
+leave:
+	return ret;
+}
+
+static void pmem_unlock_data_and_mm(struct pmem_data *data,
+				    struct mm_struct *mm)
+{
+	up_write(&data->sem);
+	if (mm != NULL) {
+		up_write(&mm->mmap_sem);
+		mmput(mm);
+	}
+}
+
+static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
+				 struct mm_struct **locked_mm)
+{
+	int ret = 0;
+	struct mm_struct *mm = NULL;
+#if PMEM_DEBUG_MSGS
+	char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+	DLOG("pid %u(%s) file %p(%ld)\n",
+		current->pid, get_task_comm(currtask_name, current),
+		file, file_count(file));
+
+	*locked_mm = NULL;
+lock_mm:
+	down_read(&data->sem);
+	if (PMEM_IS_SUBMAP(data)) {
+		mm = get_task_mm(data->task);
+		if (!mm) {
+			up_read(&data->sem);
+#if PMEM_DEBUG
+			pr_alert("pmem: can't remap - task is gone!\n");
+#endif
+			return -1;
+		}
+	}
+	up_read(&data->sem);
+
+	if (mm)
+		down_write(&mm->mmap_sem);
+
+	down_write(&data->sem);
+	/* check that the file didn't get mmaped before we could take the
+	 * data sem, this should be safe b/c you can only submap each file
+	 * once */
+	if (PMEM_IS_SUBMAP(data) && !mm) {
+		pmem_unlock_data_and_mm(data, mm);
+		DLOG("mapping contention, repeating mmap op\n");
+		goto lock_mm;
+	}
+	/* now check that vma.mm is still there, it could have been
+	 * deleted by vma_close before we could get the data->sem */
+	if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
+		/* might as well release this */
+		if (data->flags & PMEM_FLAGS_SUBMAP) {
+			put_task_struct(data->task);
+			data->task = NULL;
+			/* lower the submap flag to show the mm is gone */
+			data->flags &= ~(PMEM_FLAGS_SUBMAP);
+		}
+		pmem_unlock_data_and_mm(data, mm);
+#if PMEM_DEBUG
+		pr_alert("pmem: vma.mm went away!\n");
+#endif
+		return -1;
+	}
+	*locked_mm = mm;
+	return ret;
+}
+
+int pmem_remap(struct pmem_region *region, struct file *file,
+		      unsigned operation)
+{
+	int ret;
+	struct pmem_region_node *region_node;
+	struct mm_struct *mm = NULL;
+	struct list_head *elt, *elt2;
+	int id = get_id(file);
+	struct pmem_data *data;
+
+	DLOG("operation %#x, region offset %ld, region len %ld\n",
+		operation, region->offset, region->len);
+
+	if (!is_pmem_file(file)) {
+#if PMEM_DEBUG
+		pr_err("pmem: remap request for non-pmem file descriptor\n");
+#endif
+		return -EINVAL;
+	}
+
+	/* is_pmem_file fails if !file */
+	data = file->private_data;
+
+	/* pmem region must be aligned on a page boundry */
+	if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
+		 !PMEM_IS_PAGE_ALIGNED(region->len))) {
+#if PMEM_DEBUG
+		pr_err("pmem: request for unaligned pmem"
+			"suballocation %lx %lx\n",
+			region->offset, region->len);
+#endif
+		return -EINVAL;
+	}
+
+	/* if userspace requests a region of len 0, there's nothing to do */
+	if (region->len == 0)
+		return 0;
+
+	/* lock the mm and data */
+	ret = pmem_lock_data_and_mm(file, data, &mm);
+	if (ret)
+		return 0;
+
+	/* only the owner of the master file can remap the client fds
+	 * that back in it */
+	if (!is_master_owner(file)) {
+#if PMEM_DEBUG
+		pr_err("pmem: remap requested from non-master process\n");
+#endif
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* check that the requested range is within the src allocation */
+	if (unlikely((region->offset > pmem[id].len(id, data)) ||
+		     (region->len > pmem[id].len(id, data)) ||
+		     (region->offset + region->len > pmem[id].len(id, data)))) {
+#if PMEM_DEBUG
+		pr_err("pmem: suballoc doesn't fit in src_file!\n");
+#endif
+		ret = -EINVAL;
+		goto err;
+	}
+
+	if (operation == PMEM_MAP) {
+		region_node = kmalloc(sizeof(struct pmem_region_node),
+			      GFP_KERNEL);
+		if (!region_node) {
+			ret = -ENOMEM;
+#if PMEM_DEBUG
+			pr_alert("pmem: No space to allocate remap metadata!");
+#endif
+			goto err;
+		}
+		region_node->region = *region;
+		list_add(&region_node->list, &data->region_list);
+	} else if (operation == PMEM_UNMAP) {
+		int found = 0;
+		list_for_each_safe(elt, elt2, &data->region_list) {
+			region_node = list_entry(elt, struct pmem_region_node,
+				      list);
+			if (region->len == 0 ||
+			    (region_node->region.offset == region->offset &&
+			    region_node->region.len == region->len)) {
+				list_del(elt);
+				kfree(region_node);
+				found = 1;
+			}
+		}
+		if (!found) {
+#if PMEM_DEBUG
+			pr_err("pmem: Unmap region does not map any"
+				" mapped region!");
+#endif
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	if (data->vma && PMEM_IS_SUBMAP(data)) {
+		if (operation == PMEM_MAP)
+			ret = pmem_remap_pfn_range(id, data->vma, data,
+				   region->offset, region->len);
+		else if (operation == PMEM_UNMAP)
+			ret = pmem_unmap_pfn_range(id, data->vma, data,
+				   region->offset, region->len);
+	}
+
+err:
+	pmem_unlock_data_and_mm(data, mm);
+	return ret;
+}
+
+static void pmem_revoke(struct file *file, struct pmem_data *data)
+{
+	struct pmem_region_node *region_node;
+	struct list_head *elt, *elt2;
+	struct mm_struct *mm = NULL;
+	int id = get_id(file);
+	int ret = 0;
+
+	data->master_file = NULL;
+	ret = pmem_lock_data_and_mm(file, data, &mm);
+	/* if lock_data_and_mm fails either the task that mapped the fd, or
+	 * the vma that mapped it have already gone away, nothing more
+	 * needs to be done */
+	if (ret)
+		return;
+	/* unmap everything */
+	/* delete the regions and region list nothing is mapped any more */
+	if (data->vma)
+		list_for_each_safe(elt, elt2, &data->region_list) {
+			region_node = list_entry(elt, struct pmem_region_node,
+						 list);
+			pmem_unmap_pfn_range(id, data->vma, data,
+					     region_node->region.offset,
+					     region_node->region.len);
+			list_del(elt);
+			kfree(region_node);
+	}
+	/* delete the master file */
+	pmem_unlock_data_and_mm(data, mm);
+}
+
+static void pmem_get_size(struct pmem_region *region, struct file *file)
+{
+	/* called via ioctl file op, so file guaranteed to be not NULL */
+	struct pmem_data *data = file->private_data;
+	int id = get_id(file);
+
+	down_read(&data->sem);
+	if (!has_allocation(file)) {
+		region->offset = 0;
+		region->len = 0;
+	} else {
+		region->offset = pmem[id].start_addr(id, data);
+		region->len = pmem[id].len(id, data);
+	}
+	up_read(&data->sem);
+	DLOG("offset 0x%lx len 0x%lx\n", region->offset, region->len);
+}
+
+
+static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	/* called from user space as file op, so file guaranteed to be not
+	 * NULL
+	 */
+	struct pmem_data *data = file->private_data;
+	int id = get_id(file);
+#if PMEM_DEBUG_MSGS
+	char currtask_name[
+		FIELD_SIZEOF(struct task_struct, comm) + 1];
+#endif
+
+	DLOG("pid %u(%s) file %p(%ld) cmd %#x, dev %s(id: %d)\n",
+		current->pid, get_task_comm(currtask_name, current),
+		file, file_count(file), cmd, get_name(file), id);
+
+	switch (cmd) {
+	case PMEM_GET_PHYS:
+		{
+			struct pmem_region region;
+
+			DLOG("get_phys\n");
+			down_read(&data->sem);
+			if (!has_allocation(file)) {
+				region.offset = 0;
+				region.len = 0;
+			} else {
+				region.offset = pmem[id].start_addr(id, data);
+				region.len = pmem[id].len(id, data);
+			}
+			up_read(&data->sem);
+
+			if (copy_to_user((void __user *)arg, &region,
+						sizeof(struct pmem_region)))
+				return -EFAULT;
+
+			DLOG("pmem: successful request for "
+				"physical address of pmem region id %d, "
+				"offset 0x%lx, len 0x%lx\n",
+				id, region.offset, region.len);
+
+			break;
+		}
+	case PMEM_MAP:
+		{
+			struct pmem_region region;
+			DLOG("map\n");
+			if (copy_from_user(&region, (void __user *)arg,
+						sizeof(struct pmem_region)))
+				return -EFAULT;
+			return pmem_remap(&region, file, PMEM_MAP);
+		}
+		break;
+	case PMEM_UNMAP:
+		{
+			struct pmem_region region;
+			DLOG("unmap\n");
+			if (copy_from_user(&region, (void __user *)arg,
+						sizeof(struct pmem_region)))
+				return -EFAULT;
+			return pmem_remap(&region, file, PMEM_UNMAP);
+			break;
+		}
+	case PMEM_GET_SIZE:
+		{
+			struct pmem_region region;
+			DLOG("get_size\n");
+			pmem_get_size(&region, file);
+			if (copy_to_user((void __user *)arg, &region,
+						sizeof(struct pmem_region)))
+				return -EFAULT;
+			break;
+		}
+	case PMEM_GET_TOTAL_SIZE:
+		{
+			struct pmem_region region;
+			DLOG("get total size\n");
+			region.offset = 0;
+			get_id(file);
+			region.len = pmem[id].size;
+			if (copy_to_user((void __user *)arg, &region,
+						sizeof(struct pmem_region)))
+				return -EFAULT;
+			break;
+		}
+	case PMEM_GET_FREE_SPACE:
+		{
+			struct pmem_freespace fs;
+			DLOG("get freespace on %s(id: %d)\n",
+				get_name(file), id);
+
+			mutex_lock(&pmem[id].arena_mutex);
+			pmem[id].free_space(id, &fs);
+			mutex_unlock(&pmem[id].arena_mutex);
+
+			DLOG("%s(id: %d) total free %lu, largest %lu\n",
+				get_name(file), id, fs.total, fs.largest);
+
+			if (copy_to_user((void __user *)arg, &fs,
+				sizeof(struct pmem_freespace)))
+				return -EFAULT;
+			break;
+	}
+
+	case PMEM_ALLOCATE:
+		{
+			int ret = 0;
+			DLOG("allocate, id %d\n", id);
+			down_write(&data->sem);
+			if (has_allocation(file)) {
+				pr_err("pmem: Existing allocation found on "
+					"this file descrpitor\n");
+				up_write(&data->sem);
+				return -EINVAL;
+			}
+
+			mutex_lock(&pmem[id].arena_mutex);
+			data->index = pmem_allocate_from_id(id,
+					arg,
+					SZ_4K);
+			mutex_unlock(&pmem[id].arena_mutex);
+			ret = data->index == -1 ? -ENOMEM :
+				data->index;
+			up_write(&data->sem);
+			return ret;
+		}
+	case PMEM_ALLOCATE_ALIGNED:
+		{
+			struct pmem_allocation alloc;
+			int ret = 0;
+
+			if (copy_from_user(&alloc, (void __user *)arg,
+						sizeof(struct pmem_allocation)))
+				return -EFAULT;
+			DLOG("allocate id align %d %u\n", id, alloc.align);
+			down_write(&data->sem);
+			if (has_allocation(file)) {
+				pr_err("pmem: Existing allocation found on "
+					"this file descrpitor\n");
+				up_write(&data->sem);
+				return -EINVAL;
+			}
+
+			if (alloc.align & (alloc.align - 1)) {
+				pr_err("pmem: Alignment is not a power of 2\n");
+				return -EINVAL;
+			}
+
+			if (alloc.align != SZ_4K &&
+					(pmem[id].allocator_type !=
+						PMEM_ALLOCATORTYPE_BITMAP)) {
+				pr_err("pmem: Non 4k alignment requires bitmap"
+					" allocator on %s\n", pmem[id].name);
+				return -EINVAL;
+			}
+
+			if (alloc.align > SZ_1M ||
+				alloc.align < SZ_4K) {
+				pr_err("pmem: Invalid Alignment (%u) "
+					"specified\n", alloc.align);
+				return -EINVAL;
+			}
+
+			mutex_lock(&pmem[id].arena_mutex);
+			data->index = pmem_allocate_from_id(id,
+				alloc.size,
+				alloc.align);
+			mutex_unlock(&pmem[id].arena_mutex);
+			ret = data->index == -1 ? -ENOMEM :
+				data->index;
+			up_write(&data->sem);
+			return ret;
+		}
+	case PMEM_CONNECT:
+		DLOG("connect\n");
+		return pmem_connect(arg, file);
+	case PMEM_CLEAN_INV_CACHES:
+	case PMEM_CLEAN_CACHES:
+	case PMEM_INV_CACHES:
+		{
+			struct pmem_addr pmem_addr;
+
+			if (copy_from_user(&pmem_addr, (void __user *)arg,
+						sizeof(struct pmem_addr)))
+				return -EFAULT;
+
+			return pmem_cache_maint(file, cmd, &pmem_addr);
+		}
+	default:
+		if (pmem[id].ioctl)
+			return pmem[id].ioctl(file, cmd, arg);
+
+		DLOG("ioctl invalid (%#x)\n", cmd);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void ioremap_pmem(int id)
+{
+	unsigned long addr;
+	const struct mem_type *type;
+
+	DLOG("PMEMDEBUG: ioremaping for %s\n", pmem[id].name);
+	if (pmem[id].map_on_demand) {
+		addr = (unsigned long)pmem[id].area->addr;
+		if (pmem[id].cached)
+			type = get_mem_type(MT_DEVICE_CACHED);
+		else
+			type = get_mem_type(MT_DEVICE);
+		DLOG("PMEMDEBUG: Remap phys %lx to virt %lx on %s\n",
+			pmem[id].base, addr, pmem[id].name);
+		if (ioremap_pages(addr, pmem[id].base,  pmem[id].size, type)) {
+				pr_err("pmem: Failed to map pages\n");
+				BUG();
+		}
+		pmem[id].vbase = pmem[id].area->addr;
+		/* Flush the cache after installing page table entries to avoid
+		 * aliasing when these pages are remapped to user space.
+		 */
+		flush_cache_vmap(addr, addr + pmem[id].size);
+	} else {
+		if (pmem[id].cached)
+			pmem[id].vbase = ioremap_cached(pmem[id].base,
+						pmem[id].size);
+	#ifdef ioremap_ext_buffered
+		else if (pmem[id].buffered)
+			pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
+						pmem[id].size);
+	#endif
+		else
+			pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
+	}
+}
+
+int pmem_setup(struct android_pmem_platform_data *pdata,
+	       long (*ioctl)(struct file *, unsigned int, unsigned long),
+	       int (*release)(struct inode *, struct file *))
+{
+	int i, index = 0, id;
+	struct vm_struct *pmem_vma = NULL;
+	struct page *page;
+
+	if (id_count >= PMEM_MAX_DEVICES) {
+		pr_alert("pmem: %s: unable to register driver(%s) - no more "
+			"devices available!\n", __func__, pdata->name);
+		goto err_no_mem;
+	}
+
+	if (!pdata->size) {
+		pr_alert("pmem: %s: unable to register pmem driver(%s) - zero "
+			"size passed in!\n", __func__, pdata->name);
+		goto err_no_mem;
+	}
+
+	id = id_count++;
+
+	pmem[id].id = id;
+
+	if (pmem[id].allocate) {
+		pr_alert("pmem: %s: unable to register pmem driver - "
+			"duplicate registration of %s!\n",
+			__func__, pdata->name);
+		goto err_no_mem;
+	}
+
+	pmem[id].allocator_type = pdata->allocator_type;
+
+	/* 'quantum' is a "hidden" variable that defaults to 0 in the board
+	 * files */
+	pmem[id].quantum = pdata->quantum ?: PMEM_MIN_ALLOC;
+	if (pmem[id].quantum < PMEM_MIN_ALLOC ||
+		!is_power_of_2(pmem[id].quantum)) {
+		pr_alert("pmem: %s: unable to register pmem driver %s - "
+			"invalid quantum value (%#x)!\n",
+			__func__, pdata->name, pmem[id].quantum);
+		goto err_reset_pmem_info;
+	}
+
+	if (pdata->size % pmem[id].quantum) {
+		/* bad alignment for size! */
+		pr_alert("pmem: %s: Unable to register driver %s - "
+			"memory region size (%#lx) is not a multiple of "
+			"quantum size(%#x)!\n", __func__, pdata->name,
+			pdata->size, pmem[id].quantum);
+		goto err_reset_pmem_info;
+	}
+
+	pmem[id].cached = pdata->cached;
+	pmem[id].buffered = pdata->buffered;
+	pmem[id].size = pdata->size;
+	pmem[id].memory_type = pdata->memory_type;
+	strlcpy(pmem[id].name, pdata->name, PMEM_NAME_SIZE);
+
+	pmem[id].num_entries = pmem[id].size / pmem[id].quantum;
+
+	memset(&pmem[id].kobj, 0, sizeof(pmem[0].kobj));
+	pmem[id].kobj.kset = pmem_kset;
+
+	switch (pmem[id].allocator_type) {
+	case PMEM_ALLOCATORTYPE_ALLORNOTHING:
+		pmem[id].allocate = pmem_allocator_all_or_nothing;
+		pmem[id].free = pmem_free_all_or_nothing;
+		pmem[id].free_space = pmem_free_space_all_or_nothing;
+		pmem[id].len = pmem_len_all_or_nothing;
+		pmem[id].start_addr = pmem_start_addr_all_or_nothing;
+		pmem[id].num_entries = 1;
+		pmem[id].quantum = pmem[id].size;
+		pmem[id].allocator.all_or_nothing.allocated = 0;
+
+		if (kobject_init_and_add(&pmem[id].kobj,
+				&pmem_allornothing_ktype, NULL,
+				"%s", pdata->name))
+			goto out_put_kobj;
+
+		break;
+
+	case PMEM_ALLOCATORTYPE_BUDDYBESTFIT:
+		pmem[id].allocator.buddy_bestfit.buddy_bitmap = kmalloc(
+			pmem[id].num_entries * sizeof(struct pmem_bits),
+			GFP_KERNEL);
+		if (!pmem[id].allocator.buddy_bestfit.buddy_bitmap)
+			goto err_reset_pmem_info;
+
+		memset(pmem[id].allocator.buddy_bestfit.buddy_bitmap, 0,
+			sizeof(struct pmem_bits) * pmem[id].num_entries);
+
+		for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--)
+			if ((pmem[id].num_entries) &  1<<i) {
+				PMEM_BUDDY_ORDER(id, index) = i;
+				index = PMEM_BUDDY_NEXT_INDEX(id, index);
+			}
+		pmem[id].allocate = pmem_allocator_buddy_bestfit;
+		pmem[id].free = pmem_free_buddy_bestfit;
+		pmem[id].free_space = pmem_free_space_buddy_bestfit;
+		pmem[id].len = pmem_len_buddy_bestfit;
+		pmem[id].start_addr = pmem_start_addr_buddy_bestfit;
+		if (kobject_init_and_add(&pmem[id].kobj,
+				&pmem_buddy_bestfit_ktype, NULL,
+				"%s", pdata->name))
+			goto out_put_kobj;
+
+		break;
+
+	case PMEM_ALLOCATORTYPE_BITMAP: /* 0, default if not explicit */
+		pmem[id].allocator.bitmap.bitm_alloc = kmalloc(
+			PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS *
+				sizeof(*pmem[id].allocator.bitmap.bitm_alloc),
+			GFP_KERNEL);
+		if (!pmem[id].allocator.bitmap.bitm_alloc) {
+			pr_alert("pmem: %s: Unable to register pmem "
+					"driver %s - can't allocate "
+					"bitm_alloc!\n",
+					__func__, pdata->name);
+			goto err_reset_pmem_info;
+		}
+
+		if (kobject_init_and_add(&pmem[id].kobj,
+				&pmem_bitmap_ktype, NULL,
+				"%s", pdata->name))
+			goto out_put_kobj;
+
+		for (i = 0; i < PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS; i++) {
+			pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1;
+			pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0;
+		}
+
+		pmem[id].allocator.bitmap.bitmap_allocs =
+			PMEM_INITIAL_NUM_BITMAP_ALLOCATIONS;
+
+		pmem[id].allocator.bitmap.bitmap =
+			kcalloc((pmem[id].num_entries + 31) / 32,
+				sizeof(unsigned int), GFP_KERNEL);
+		if (!pmem[id].allocator.bitmap.bitmap) {
+			pr_alert("pmem: %s: Unable to register pmem "
+				"driver - can't allocate bitmap!\n",
+				__func__);
+			goto err_cant_register_device;
+		}
+		pmem[id].allocator.bitmap.bitmap_free = pmem[id].num_entries;
+
+		pmem[id].allocate = pmem_allocator_bitmap;
+		pmem[id].free = pmem_free_bitmap;
+		pmem[id].free_space = pmem_free_space_bitmap;
+		pmem[id].len = pmem_len_bitmap;
+		pmem[id].start_addr = pmem_start_addr_bitmap;
+
+		DLOG("bitmap allocator id %d (%s), num_entries %u, raw size "
+			"%lu, quanta size %u\n",
+			id, pdata->name, pmem[id].allocator.bitmap.bitmap_free,
+			pmem[id].size, pmem[id].quantum);
+		break;
+
+	case PMEM_ALLOCATORTYPE_SYSTEM:
+
+		INIT_LIST_HEAD(&pmem[id].allocator.system_mem.alist);
+
+		pmem[id].allocator.system_mem.used = 0;
+		pmem[id].vbase = NULL;
+
+		if (kobject_init_and_add(&pmem[id].kobj,
+				&pmem_system_ktype, NULL,
+				"%s", pdata->name))
+			goto out_put_kobj;
+
+		pmem[id].allocate = pmem_allocator_system;
+		pmem[id].free = pmem_free_system;
+		pmem[id].free_space = pmem_free_space_system;
+		pmem[id].len = pmem_len_system;
+		pmem[id].start_addr = pmem_start_addr_system;
+		pmem[id].num_entries = 0;
+		pmem[id].quantum = PAGE_SIZE;
+
+		DLOG("system allocator id %d (%s), raw size %lu\n",
+			id, pdata->name, pmem[id].size);
+		break;
+
+	default:
+		pr_alert("Invalid allocator type (%d) for pmem driver\n",
+			pdata->allocator_type);
+		goto err_reset_pmem_info;
+	}
+
+	pmem[id].ioctl = ioctl;
+	pmem[id].release = release;
+	mutex_init(&pmem[id].arena_mutex);
+	mutex_init(&pmem[id].data_list_mutex);
+	INIT_LIST_HEAD(&pmem[id].data_list);
+
+	pmem[id].dev.name = pdata->name;
+	pmem[id].dev.minor = id;
+	pmem[id].dev.fops = &pmem_fops;
+	pmem[id].reusable = pdata->reusable;
+	pr_info("pmem: Initializing %s as %s\n",
+		pdata->name, pdata->cached ? "cached" : "non-cached");
+
+	if (misc_register(&pmem[id].dev)) {
+		pr_alert("Unable to register pmem driver!\n");
+		goto err_cant_register_device;
+	}
+
+	if (!pmem[id].reusable) {
+		pmem[id].base = allocate_contiguous_memory_nomap(pmem[id].size,
+			pmem[id].memory_type, PAGE_SIZE);
+		if (!pmem[id].base) {
+			pr_err("pmem: Cannot allocate from reserved memory for %s\n",
+				pdata->name);
+			goto err_misc_deregister;
+		}
+	}
+
+	/* reusable pmem requires map on demand */
+	pmem[id].map_on_demand = pdata->map_on_demand || pdata->reusable;
+	if (pmem[id].map_on_demand) {
+		if (pmem[id].reusable) {
+			const struct fmem_data *fmem_info = fmem_get_info();
+			pmem[id].area = fmem_info->area;
+			pmem[id].base = fmem_info->phys;
+		} else {
+			pmem_vma = get_vm_area(pmem[id].size, VM_IOREMAP);
+			if (!pmem_vma) {
+				pr_err("pmem: Failed to allocate virtual space for "
+					"%s\n", pdata->name);
+				goto err_free;
+			}
+			pr_err("pmem: Reserving virtual address range %lx - %lx for"
+				" %s\n", (unsigned long) pmem_vma->addr,
+				(unsigned long) pmem_vma->addr + pmem[id].size,
+				pdata->name);
+			pmem[id].area = pmem_vma;
+		}
+	} else
+		pmem[id].area = NULL;
+
+	page = alloc_page(GFP_KERNEL);
+	if (!page) {
+		pr_err("pmem: Failed to allocate page for %s\n", pdata->name);
+		goto cleanup_vm;
+	}
+	pmem[id].garbage_pfn = page_to_pfn(page);
+	atomic_set(&pmem[id].allocation_cnt, 0);
+
+	if (pdata->setup_region)
+		pmem[id].region_data = pdata->setup_region();
+
+	if (pdata->request_region)
+		pmem[id].mem_request = pdata->request_region;
+
+	if (pdata->release_region)
+		pmem[id].mem_release = pdata->release_region;
+
+	pr_info("allocating %lu bytes at %lx physical for %s\n",
+		pmem[id].size, pmem[id].base, pmem[id].name);
+
+	return 0;
+
+cleanup_vm:
+	if (!pmem[id].reusable)
+		remove_vm_area(pmem_vma);
+err_free:
+	if (!pmem[id].reusable)
+		free_contiguous_memory_by_paddr(pmem[id].base);
+err_misc_deregister:
+	misc_deregister(&pmem[id].dev);
+err_cant_register_device:
+out_put_kobj:
+	kobject_put(&pmem[id].kobj);
+	if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BUDDYBESTFIT)
+		kfree(pmem[id].allocator.buddy_bestfit.buddy_bitmap);
+	else if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BITMAP) {
+		kfree(pmem[id].allocator.bitmap.bitmap);
+		kfree(pmem[id].allocator.bitmap.bitm_alloc);
+	}
+err_reset_pmem_info:
+	pmem[id].allocate = 0;
+	pmem[id].dev.minor = -1;
+err_no_mem:
+	return -1;
+}
+
+static int pmem_probe(struct platform_device *pdev)
+{
+	struct android_pmem_platform_data *pdata;
+
+	if (!pdev || !pdev->dev.platform_data) {
+		pr_alert("Unable to probe pmem!\n");
+		return -1;
+	}
+	pdata = pdev->dev.platform_data;
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	return pmem_setup(pdata, NULL, NULL);
+}
+
+static int pmem_remove(struct platform_device *pdev)
+{
+	int id = pdev->id;
+	__free_page(pfn_to_page(pmem[id].garbage_pfn));
+	pm_runtime_disable(&pdev->dev);
+	if (pmem[id].vbase)
+		iounmap(pmem[id].vbase);
+	if (pmem[id].map_on_demand && !pmem[id].reusable && pmem[id].area)
+		free_vm_area(pmem[id].area);
+	if (pmem[id].base)
+		free_contiguous_memory_by_paddr(pmem[id].base);
+	kobject_put(&pmem[id].kobj);
+	if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BUDDYBESTFIT)
+		kfree(pmem[id].allocator.buddy_bestfit.buddy_bitmap);
+	else if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_BITMAP) {
+		kfree(pmem[id].allocator.bitmap.bitmap);
+		kfree(pmem[id].allocator.bitmap.bitm_alloc);
+	}
+	misc_deregister(&pmem[id].dev);
+	return 0;
+}
+
+static int pmem_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int pmem_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops pmem_dev_pm_ops = {
+	.runtime_suspend = pmem_runtime_suspend,
+	.runtime_resume = pmem_runtime_resume,
+};
+
+static struct platform_driver pmem_driver = {
+	.probe = pmem_probe,
+	.remove = pmem_remove,
+	.driver = { .name = "android_pmem",
+		    .pm = &pmem_dev_pm_ops,
+  }
+};
+
+
+static int __init pmem_init(void)
+{
+	/* create /sys/kernel/<PMEM_SYSFS_DIR_NAME> directory */
+	pmem_kset = kset_create_and_add(PMEM_SYSFS_DIR_NAME,
+		NULL, kernel_kobj);
+	if (!pmem_kset) {
+		pr_err("pmem(%s):kset_create_and_add fail\n", __func__);
+		return -ENOMEM;
+	}
+
+	return platform_driver_register(&pmem_driver);
+}
+
+static void __exit pmem_exit(void)
+{
+	platform_driver_unregister(&pmem_driver);
+}
+
+module_init(pmem_init);
+module_exit(pmem_exit);
+
diff --git a/drivers/misc/pmic8058-pwm.c b/drivers/misc/pmic8058-pwm.c
new file mode 100644
index 0000000..33407b7
--- /dev/null
+++ b/drivers/misc/pmic8058-pwm.c
@@ -0,0 +1,1085 @@
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 PWM driver
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/pwm.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/pmic8058-pwm.h>
+
+#define	PM8058_LPG_BANKS		8
+#define	PM8058_PWM_CHANNELS		PM8058_LPG_BANKS	/* MAX=8 */
+
+#define	PM8058_LPG_CTL_REGS		7
+
+/* PMIC8058 LPG/PWM */
+#define	SSBI_REG_ADDR_LPG_CTL_BASE	0x13C
+#define	SSBI_REG_ADDR_LPG_CTL(n)	(SSBI_REG_ADDR_LPG_CTL_BASE + (n))
+#define	SSBI_REG_ADDR_LPG_BANK_SEL	0x143
+#define	SSBI_REG_ADDR_LPG_BANK_EN	0x144
+#define	SSBI_REG_ADDR_LPG_LUT_CFG0	0x145
+#define	SSBI_REG_ADDR_LPG_LUT_CFG1	0x146
+#define	SSBI_REG_ADDR_LPG_TEST		0x147
+
+/* Control 0 */
+#define	PM8058_PWM_1KHZ_COUNT_MASK	0xF0
+#define	PM8058_PWM_1KHZ_COUNT_SHIFT	4
+
+#define	PM8058_PWM_1KHZ_COUNT_MAX	15
+
+#define	PM8058_PWM_OUTPUT_EN		0x08
+#define	PM8058_PWM_PWM_EN		0x04
+#define	PM8058_PWM_RAMP_GEN_EN		0x02
+#define	PM8058_PWM_RAMP_START		0x01
+
+#define	PM8058_PWM_PWM_START		(PM8058_PWM_OUTPUT_EN \
+					| PM8058_PWM_PWM_EN)
+#define	PM8058_PWM_RAMP_GEN_START	(PM8058_PWM_RAMP_GEN_EN \
+					| PM8058_PWM_RAMP_START)
+
+/* Control 1 */
+#define	PM8058_PWM_REVERSE_EN		0x80
+#define	PM8058_PWM_BYPASS_LUT		0x40
+#define	PM8058_PWM_HIGH_INDEX_MASK	0x3F
+
+/* Control 2 */
+#define	PM8058_PWM_LOOP_EN		0x80
+#define	PM8058_PWM_RAMP_UP		0x40
+#define	PM8058_PWM_LOW_INDEX_MASK	0x3F
+
+/* Control 3 */
+#define	PM8058_PWM_VALUE_BIT7_0		0xFF
+#define	PM8058_PWM_VALUE_BIT5_0		0x3F
+
+/* Control 4 */
+#define	PM8058_PWM_VALUE_BIT8		0x80
+
+#define	PM8058_PWM_CLK_SEL_MASK		0x60
+#define	PM8058_PWM_CLK_SEL_SHIFT	5
+
+#define	PM8058_PWM_CLK_SEL_NO		0
+#define	PM8058_PWM_CLK_SEL_1KHZ		1
+#define	PM8058_PWM_CLK_SEL_32KHZ	2
+#define	PM8058_PWM_CLK_SEL_19P2MHZ	3
+
+#define	PM8058_PWM_PREDIVIDE_MASK	0x18
+#define	PM8058_PWM_PREDIVIDE_SHIFT	3
+
+#define	PM8058_PWM_PREDIVIDE_2		0
+#define	PM8058_PWM_PREDIVIDE_3		1
+#define	PM8058_PWM_PREDIVIDE_5		2
+#define	PM8058_PWM_PREDIVIDE_6		3
+
+#define	PM8058_PWM_M_MASK	0x07
+#define	PM8058_PWM_M_MIN	0
+#define	PM8058_PWM_M_MAX	7
+
+/* Control 5 */
+#define	PM8058_PWM_PAUSE_COUNT_HI_MASK		0xFC
+#define	PM8058_PWM_PAUSE_COUNT_HI_SHIFT		2
+
+#define	PM8058_PWM_PAUSE_ENABLE_HIGH		0x02
+#define	PM8058_PWM_SIZE_9_BIT			0x01
+
+/* Control 6 */
+#define	PM8058_PWM_PAUSE_COUNT_LO_MASK		0xFC
+#define	PM8058_PWM_PAUSE_COUNT_LO_SHIFT		2
+
+#define	PM8058_PWM_PAUSE_ENABLE_LOW		0x02
+#define	PM8058_PWM_RESERVED			0x01
+
+#define	PM8058_PWM_PAUSE_COUNT_MAX		56 /* < 2^6 = 64*/
+
+/* LUT_CFG1 */
+#define	PM8058_PWM_LUT_READ			0x40
+
+/* TEST */
+#define	PM8058_PWM_DTEST_MASK		0x38
+#define	PM8058_PWM_DTEST_SHIFT		3
+
+#define	PM8058_PWM_DTEST_BANK_MASK	0x07
+
+/* PWM frequency support
+ *
+ * PWM Frequency = Clock Frequency / (N * T)
+ * 	or
+ * PWM Period = Clock Period * (N * T)
+ * 	where
+ * N = 2^9 or 2^6 for 9-bit or 6-bit PWM size
+ * T = Pre-divide * 2^m, m = 0..7 (exponent)
+ *
+ * We use this formula to figure out m for the best pre-divide and clock:
+ * (PWM Period / N) / 2^m = (Pre-divide * Clock Period)
+*/
+#define	NUM_CLOCKS	3
+
+#define	NSEC_1000HZ	(NSEC_PER_SEC / 1000)
+#define	NSEC_32768HZ	(NSEC_PER_SEC / 32768)
+#define	NSEC_19P2MHZ	(NSEC_PER_SEC / 19200000)
+
+#define	CLK_PERIOD_MIN	NSEC_19P2MHZ
+#define	CLK_PERIOD_MAX	NSEC_1000HZ
+
+#define	NUM_PRE_DIVIDE	3	/* No default support for pre-divide = 6 */
+
+#define	PRE_DIVIDE_0		2
+#define	PRE_DIVIDE_1		3
+#define	PRE_DIVIDE_2		5
+
+#define	PRE_DIVIDE_MIN		PRE_DIVIDE_0
+#define	PRE_DIVIDE_MAX		PRE_DIVIDE_2
+
+static char *clks[NUM_CLOCKS] = {
+	"1K", "32768", "19.2M"
+};
+
+static unsigned pre_div[NUM_PRE_DIVIDE] = {
+	PRE_DIVIDE_0, PRE_DIVIDE_1, PRE_DIVIDE_2
+};
+
+static unsigned int pt_t[NUM_PRE_DIVIDE][NUM_CLOCKS] = {
+	{	PRE_DIVIDE_0 * NSEC_1000HZ,
+		PRE_DIVIDE_0 * NSEC_32768HZ,
+		PRE_DIVIDE_0 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_1 * NSEC_1000HZ,
+		PRE_DIVIDE_1 * NSEC_32768HZ,
+		PRE_DIVIDE_1 * NSEC_19P2MHZ,
+	},
+	{	PRE_DIVIDE_2 * NSEC_1000HZ,
+		PRE_DIVIDE_2 * NSEC_32768HZ,
+		PRE_DIVIDE_2 * NSEC_19P2MHZ,
+	},
+};
+
+#define	MIN_MPT	((PRE_DIVIDE_MIN * CLK_PERIOD_MIN) << PM8058_PWM_M_MIN)
+#define	MAX_MPT	((PRE_DIVIDE_MAX * CLK_PERIOD_MAX) << PM8058_PWM_M_MAX)
+
+#define	CHAN_LUT_SIZE		(PM_PWM_LUT_SIZE / PM8058_PWM_CHANNELS)
+
+/* Private data */
+struct pm8058_pwm_chip;
+
+struct pwm_device {
+	struct device		*dev;
+	int			pwm_id;		/* = bank/channel id */
+	int			in_use;
+	const char		*label;
+	struct pm8058_pwm_period	period;
+	int			pwm_value;
+	int			pwm_period;
+	int			use_lut;	/* Use LUT to output PWM */
+	u8			pwm_ctl[PM8058_LPG_CTL_REGS];
+	int			irq;
+	struct pm8058_pwm_chip  *chip;
+};
+
+struct pm8058_pwm_chip {
+	struct pwm_device	pwm_dev[PM8058_PWM_CHANNELS];
+	u8			bank_mask;
+	struct mutex		pwm_mutex;
+	struct pm8058_pwm_pdata	*pdata;
+};
+
+static struct pm8058_pwm_chip	*pwm_chip;
+
+struct pm8058_pwm_lut {
+	/* LUT parameters */
+	int	lut_duty_ms;
+	int	lut_lo_index;
+	int	lut_hi_index;
+	int	lut_pause_hi;
+	int	lut_pause_lo;
+	int	flags;
+};
+
+static u16 duty_msec[PM8058_PWM_1KHZ_COUNT_MAX + 1] = {
+	0, 1, 2, 3, 4, 6, 8, 16, 18, 24, 32, 36, 64, 128, 256, 512
+};
+
+static u16 pause_count[PM8058_PWM_PAUSE_COUNT_MAX + 1] = {
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+	23, 28, 31, 42, 47, 56, 63, 83, 94, 111, 125, 167, 188, 222, 250, 333,
+	375, 500, 667, 750, 800, 900, 1000, 1100,
+	1200, 1300, 1400, 1500, 1600, 1800, 2000, 2500,
+	3000, 3500, 4000, 4500, 5000, 5500, 6000, 6500,
+	7000
+};
+
+/* Internal functions */
+static void pm8058_pwm_save(u8 *u8p, u8 mask, u8 val)
+{
+	*u8p &= ~mask;
+	*u8p |= val & mask;
+}
+
+static int pm8058_pwm_bank_enable(struct pwm_device *pwm, int enable)
+{
+	int	rc;
+	u8	reg;
+	struct pm8058_pwm_chip	*chip;
+
+	chip = pwm->chip;
+
+	if (enable)
+		reg = chip->bank_mask | (1 << pwm->pwm_id);
+	else
+		reg = chip->bank_mask & ~(1 << pwm->pwm_id);
+
+	rc = pm8xxx_writeb(pwm->dev->parent,
+				SSBI_REG_ADDR_LPG_BANK_EN, reg);
+	if (rc) {
+		pr_err("pm8xxx_write(): rc=%d (Enable LPG Bank)\n", rc);
+		goto bail_out;
+	}
+	chip->bank_mask = reg;
+
+bail_out:
+	return rc;
+}
+
+static int pm8058_pwm_bank_sel(struct pwm_device *pwm)
+{
+	int	rc;
+	u8	reg;
+
+	reg = pwm->pwm_id;
+	rc = pm8xxx_writeb(pwm->dev->parent,
+			SSBI_REG_ADDR_LPG_BANK_SEL, reg);
+	if (rc)
+		pr_err("pm8xxx_write(): rc=%d (Select PWM Bank)\n", rc);
+	return rc;
+}
+
+static int pm8058_pwm_start(struct pwm_device *pwm, int start, int ramp_start)
+{
+	int	rc;
+	u8	reg;
+
+	if (start) {
+		reg = pwm->pwm_ctl[0] | PM8058_PWM_PWM_START;
+		if (ramp_start)
+			reg |= PM8058_PWM_RAMP_GEN_START;
+		else
+			reg &= ~PM8058_PWM_RAMP_GEN_START;
+	} else {
+		reg = pwm->pwm_ctl[0] & ~PM8058_PWM_PWM_START;
+		reg &= ~PM8058_PWM_RAMP_GEN_START;
+	}
+
+	rc = pm8xxx_writeb(pwm->dev->parent, SSBI_REG_ADDR_LPG_CTL(0),
+									reg);
+	if (rc)
+		pr_err("pm8xxx_write(): rc=%d (Enable PWM Ctl 0)\n", rc);
+	else
+		pwm->pwm_ctl[0] = reg;
+	return rc;
+}
+
+static void pm8058_pwm_calc_period(unsigned int period_us,
+				   struct pm8058_pwm_period *period)
+{
+	int	n, m, clk, div;
+	int	best_m, best_div, best_clk;
+	int	last_err, cur_err, better_err, better_m;
+	unsigned int	tmp_p, last_p, min_err, period_n;
+
+	/* PWM Period / N : handle underflow or overflow */
+	if (period_us < (PM_PWM_PERIOD_MAX / NSEC_PER_USEC))
+		period_n = (period_us * NSEC_PER_USEC) >> 6;
+	else
+		period_n = (period_us >> 6) * NSEC_PER_USEC;
+	if (period_n >= MAX_MPT) {
+		n = 9;
+		period_n >>= 3;
+	} else
+		n = 6;
+
+	min_err = MAX_MPT;
+	best_m = 0;
+	best_clk = 0;
+	best_div = 0;
+	for (clk = 0; clk < NUM_CLOCKS; clk++) {
+		for (div = 0; div < NUM_PRE_DIVIDE; div++) {
+			tmp_p = period_n;
+			last_p = tmp_p;
+			for (m = 0; m <= PM8058_PWM_M_MAX; m++) {
+				if (tmp_p <= pt_t[div][clk]) {
+					/* Found local best */
+					if (!m) {
+						better_err = pt_t[div][clk] -
+							tmp_p;
+						better_m = m;
+					} else {
+						last_err = last_p -
+							pt_t[div][clk];
+						cur_err = pt_t[div][clk] -
+							tmp_p;
+
+						if (cur_err < last_err) {
+							better_err = cur_err;
+							better_m = m;
+						} else {
+							better_err = last_err;
+							better_m = m - 1;
+						}
+					}
+
+					if (better_err < min_err) {
+						min_err = better_err;
+						best_m = better_m;
+						best_clk = clk;
+						best_div = div;
+					}
+					break;
+				} else {
+					last_p = tmp_p;
+					tmp_p >>= 1;
+				}
+			}
+		}
+	}
+
+	/* Use higher resolution */
+	if (best_m >= 3 && n == 6) {
+		n += 3;
+		best_m -= 3;
+	}
+
+	period->pwm_size = n;
+	period->clk = best_clk;
+	period->pre_div = best_div;
+	period->pre_div_exp = best_m;
+
+	pr_debug("period=%u: n=%d, m=%d, clk[%d]=%s, div[%d]=%d\n",
+		 (unsigned)period_us, n, best_m,
+		 best_clk, clks[best_clk], best_div, pre_div[best_div]);
+}
+
+static void pm8058_pwm_calc_pwm_value(struct pwm_device *pwm,
+				      unsigned int period_us,
+				      unsigned int duty_us)
+{
+	unsigned int max_pwm_value, tmp;
+
+	/* Figure out pwm_value with overflow handling */
+	tmp = 1 << (sizeof(tmp) * 8 - pwm->period.pwm_size);
+	if (duty_us < tmp) {
+		tmp = duty_us << pwm->period.pwm_size;
+		pwm->pwm_value = tmp / period_us;
+	} else {
+		tmp = period_us >> pwm->period.pwm_size;
+		pwm->pwm_value = duty_us / tmp;
+	}
+	max_pwm_value = (1 << pwm->period.pwm_size) - 1;
+	if (pwm->pwm_value > max_pwm_value)
+		pwm->pwm_value = max_pwm_value;
+}
+
+static int pm8058_pwm_change_table(struct pwm_device *pwm, int duty_pct[],
+				   int start_idx, int len, int raw_value)
+{
+	unsigned int pwm_value, max_pwm_value;
+	u8	cfg0, cfg1;
+	int	i, pwm_size;
+	int	rc = 0;
+
+	pwm_size = (pwm->pwm_ctl[5] & PM8058_PWM_SIZE_9_BIT) ? 9 : 6;
+	max_pwm_value = (1 << pwm_size) - 1;
+	for (i = 0; i < len; i++) {
+		if (raw_value)
+			pwm_value = duty_pct[i];
+		else
+			pwm_value = (duty_pct[i] << pwm_size) / 100;
+
+		if (pwm_value > max_pwm_value)
+			pwm_value = max_pwm_value;
+		cfg0 = pwm_value;
+		cfg1 = (pwm_value >> 1) & 0x80;
+		cfg1 |= start_idx + i;
+
+		rc = pm8xxx_writeb(pwm->dev->parent,
+			     SSBI_REG_ADDR_LPG_LUT_CFG0, cfg0);
+		if (rc)
+			break;
+
+		rc = pm8xxx_writeb(pwm->dev->parent,
+			     SSBI_REG_ADDR_LPG_LUT_CFG1, cfg1);
+		if (rc)
+			break;
+	}
+	return rc;
+}
+
+static void pm8058_pwm_save_index(struct pwm_device *pwm,
+				   int low_idx, int high_idx, int flags)
+{
+	pwm->pwm_ctl[1] = high_idx & PM8058_PWM_HIGH_INDEX_MASK;
+	pwm->pwm_ctl[2] = low_idx & PM8058_PWM_LOW_INDEX_MASK;
+
+	if (flags & PM_PWM_LUT_REVERSE)
+		pwm->pwm_ctl[1] |= PM8058_PWM_REVERSE_EN;
+	if (flags & PM_PWM_LUT_RAMP_UP)
+		pwm->pwm_ctl[2] |= PM8058_PWM_RAMP_UP;
+	if (flags & PM_PWM_LUT_LOOP)
+		pwm->pwm_ctl[2] |= PM8058_PWM_LOOP_EN;
+}
+
+static void pm8058_pwm_save_period(struct pwm_device *pwm)
+{
+	u8	mask, val;
+
+	val = ((pwm->period.clk + 1) << PM8058_PWM_CLK_SEL_SHIFT)
+		& PM8058_PWM_CLK_SEL_MASK;
+	val |= (pwm->period.pre_div << PM8058_PWM_PREDIVIDE_SHIFT)
+		& PM8058_PWM_PREDIVIDE_MASK;
+	val |= pwm->period.pre_div_exp & PM8058_PWM_M_MASK;
+	mask = PM8058_PWM_CLK_SEL_MASK | PM8058_PWM_PREDIVIDE_MASK |
+		PM8058_PWM_M_MASK;
+	pm8058_pwm_save(&pwm->pwm_ctl[4], mask, val);
+
+	val = (pwm->period.pwm_size > 6) ? PM8058_PWM_SIZE_9_BIT : 0;
+	mask = PM8058_PWM_SIZE_9_BIT;
+	pm8058_pwm_save(&pwm->pwm_ctl[5], mask, val);
+}
+
+static void pm8058_pwm_save_pwm_value(struct pwm_device *pwm)
+{
+	u8	mask, val;
+
+	pwm->pwm_ctl[3] = pwm->pwm_value;
+
+	val = (pwm->period.pwm_size > 6) ? (pwm->pwm_value >> 1) : 0;
+	mask = PM8058_PWM_VALUE_BIT8;
+	pm8058_pwm_save(&pwm->pwm_ctl[4], mask, val);
+}
+
+static void pm8058_pwm_save_duty_time(struct pwm_device *pwm,
+				      struct pm8058_pwm_lut *lut)
+{
+	int	i;
+	u8	mask, val;
+
+	/* Linear search for duty time */
+	for (i = 0; i < PM8058_PWM_1KHZ_COUNT_MAX; i++) {
+		if (duty_msec[i] >= lut->lut_duty_ms)
+			break;
+	}
+	val = i << PM8058_PWM_1KHZ_COUNT_SHIFT;
+
+	mask = PM8058_PWM_1KHZ_COUNT_MASK;
+	pm8058_pwm_save(&pwm->pwm_ctl[0], mask, val);
+}
+
+static void pm8058_pwm_save_pause(struct pwm_device *pwm,
+				  struct pm8058_pwm_lut *lut)
+{
+	int	i, pause_cnt, time_cnt;
+	u8	mask, val;
+
+	time_cnt = (pwm->pwm_ctl[0] & PM8058_PWM_1KHZ_COUNT_MASK)
+				>> PM8058_PWM_1KHZ_COUNT_SHIFT;
+	if (lut->flags & PM_PWM_LUT_PAUSE_HI_EN) {
+		pause_cnt = (lut->lut_pause_hi + duty_msec[time_cnt] / 2)
+				/ duty_msec[time_cnt];
+		/* Linear search for pause time */
+		for (i = 0; i < PM8058_PWM_PAUSE_COUNT_MAX; i++) {
+			if (pause_count[i] >= pause_cnt)
+				break;
+		}
+		val = (i << PM8058_PWM_PAUSE_COUNT_HI_SHIFT) &
+			PM8058_PWM_PAUSE_COUNT_HI_MASK;
+		val |= PM8058_PWM_PAUSE_ENABLE_HIGH;
+	} else
+		val = 0;
+
+	mask = PM8058_PWM_PAUSE_COUNT_HI_MASK | PM8058_PWM_PAUSE_ENABLE_HIGH;
+	pm8058_pwm_save(&pwm->pwm_ctl[5], mask, val);
+
+	if (lut->flags & PM_PWM_LUT_PAUSE_LO_EN) {
+		/* Linear search for pause time */
+		pause_cnt = (lut->lut_pause_lo + duty_msec[time_cnt] / 2)
+				/ duty_msec[time_cnt];
+		for (i = 0; i < PM8058_PWM_PAUSE_COUNT_MAX; i++) {
+			if (pause_count[i] >= pause_cnt)
+				break;
+		}
+		val = (i << PM8058_PWM_PAUSE_COUNT_LO_SHIFT) &
+			PM8058_PWM_PAUSE_COUNT_LO_MASK;
+		val |= PM8058_PWM_PAUSE_ENABLE_LOW;
+	} else
+		val = 0;
+
+	mask = PM8058_PWM_PAUSE_COUNT_LO_MASK | PM8058_PWM_PAUSE_ENABLE_LOW;
+	pm8058_pwm_save(&pwm->pwm_ctl[6], mask, val);
+}
+
+static int pm8058_pwm_write(struct pwm_device *pwm, int start, int end)
+{
+	int	i, rc;
+
+	/* Write in reverse way so 0 would be the last */
+	for (i = end - 1; i >= start; i--) {
+		rc = pm8xxx_writeb(pwm->dev->parent,
+				  SSBI_REG_ADDR_LPG_CTL(i),
+				  pwm->pwm_ctl[i]);
+		if (rc) {
+			pr_err("pm8xxx_write(): rc=%d (PWM Ctl[%d])\n", rc, i);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int pm8058_pwm_change_lut(struct pwm_device *pwm,
+				 struct pm8058_pwm_lut *lut)
+{
+	int	rc;
+
+	pm8058_pwm_save_index(pwm, lut->lut_lo_index,
+			     lut->lut_hi_index, lut->flags);
+	pm8058_pwm_save_duty_time(pwm, lut);
+	pm8058_pwm_save_pause(pwm, lut);
+	pm8058_pwm_save(&pwm->pwm_ctl[1], PM8058_PWM_BYPASS_LUT, 0);
+
+	pm8058_pwm_bank_sel(pwm);
+	rc = pm8058_pwm_write(pwm, 0, 7);
+
+	return rc;
+}
+
+/* APIs */
+/*
+ * pwm_request - request a PWM device
+ */
+struct pwm_device *pwm_request(int pwm_id, const char *label)
+{
+	struct pwm_device	*pwm;
+
+	if (pwm_id > PM8058_PWM_CHANNELS || pwm_id < 0)
+		return ERR_PTR(-EINVAL);
+	if (pwm_chip == NULL)
+		return ERR_PTR(-ENODEV);
+
+	mutex_lock(&pwm_chip->pwm_mutex);
+	pwm = &pwm_chip->pwm_dev[pwm_id];
+	if (!pwm->in_use) {
+		pwm->in_use = 1;
+		pwm->label = label;
+		pwm->use_lut = 0;
+
+		if (pwm_chip->pdata && pwm_chip->pdata->config)
+			pwm_chip->pdata->config(pwm, pwm_id, 1);
+	} else
+		pwm = ERR_PTR(-EBUSY);
+	mutex_unlock(&pwm_chip->pwm_mutex);
+
+	return pwm;
+}
+EXPORT_SYMBOL(pwm_request);
+
+/*
+ * pwm_free - free a PWM device
+ */
+void pwm_free(struct pwm_device *pwm)
+{
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL)
+		return;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (pwm->in_use) {
+		pm8058_pwm_bank_sel(pwm);
+		pm8058_pwm_start(pwm, 0, 0);
+
+		if (pwm->chip->pdata && pwm->chip->pdata->config)
+			pwm->chip->pdata->config(pwm, pwm->pwm_id, 0);
+
+		pwm->in_use = 0;
+		pwm->label = NULL;
+	}
+	pm8058_pwm_bank_enable(pwm, 0);
+	mutex_unlock(&pwm->chip->pwm_mutex);
+}
+EXPORT_SYMBOL(pwm_free);
+
+/*
+ * pwm_config - change a PWM device configuration
+ *
+ * @pwm: the PWM device
+ * @period_us: period in micro second
+ * @duty_us: duty cycle in micro second
+ */
+int pwm_config(struct pwm_device *pwm, int duty_us, int period_us)
+{
+	int				rc;
+
+	if (pwm == NULL || IS_ERR(pwm) ||
+		duty_us > period_us ||
+		(unsigned)period_us > PM_PWM_PERIOD_MAX ||
+		(unsigned)period_us < PM_PWM_PERIOD_MIN)
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+
+	if (!pwm->in_use) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (pwm->pwm_period != period_us) {
+		pm8058_pwm_calc_period(period_us, &pwm->period);
+		pm8058_pwm_save_period(pwm);
+		pwm->pwm_period = period_us;
+	}
+
+	pm8058_pwm_calc_pwm_value(pwm, period_us, duty_us);
+	pm8058_pwm_save_pwm_value(pwm);
+	pm8058_pwm_save(&pwm->pwm_ctl[1],
+			PM8058_PWM_BYPASS_LUT, PM8058_PWM_BYPASS_LUT);
+
+	pm8058_pwm_bank_sel(pwm);
+	rc = pm8058_pwm_write(pwm, 1, 6);
+
+	pr_debug("duty/period=%u/%u usec: pwm_value=%d (of %d)\n",
+		 (unsigned)duty_us, (unsigned)period_us,
+		 pwm->pwm_value, 1 << pwm->period.pwm_size);
+
+out_unlock:
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pwm_config);
+
+/*
+ * pwm_enable - start a PWM output toggling
+ */
+int pwm_enable(struct pwm_device *pwm)
+{
+	int	rc;
+
+	if (pwm == NULL || IS_ERR(pwm))
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (!pwm->in_use)
+		rc = -EINVAL;
+	else {
+		if (pwm->chip->pdata && pwm->chip->pdata->enable)
+			pwm->chip->pdata->enable(pwm, pwm->pwm_id, 1);
+
+		rc = pm8058_pwm_bank_enable(pwm, 1);
+
+		pm8058_pwm_bank_sel(pwm);
+		pm8058_pwm_start(pwm, 1, 0);
+	}
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pwm_enable);
+
+/*
+ * pwm_disable - stop a PWM output toggling
+ */
+void pwm_disable(struct pwm_device *pwm)
+{
+	if (pwm == NULL || IS_ERR(pwm) || pwm->chip == NULL)
+		return;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (pwm->in_use) {
+		pm8058_pwm_bank_sel(pwm);
+		pm8058_pwm_start(pwm, 0, 0);
+
+		pm8058_pwm_bank_enable(pwm, 0);
+
+		if (pwm->chip->pdata && pwm->chip->pdata->enable)
+			pwm->chip->pdata->enable(pwm, pwm->pwm_id, 0);
+	}
+	mutex_unlock(&pwm->chip->pwm_mutex);
+}
+EXPORT_SYMBOL(pwm_disable);
+
+/**
+ * pm8058_pwm_config_period - change PWM period
+ *
+ * @pwm: the PWM device
+ * @pwm_p: period in struct pm8058_pwm_period
+ */
+int pm8058_pwm_config_period(struct pwm_device *pwm,
+			     struct pm8058_pwm_period *period)
+{
+	int			rc;
+
+	if (pwm == NULL || IS_ERR(pwm) || period == NULL)
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+
+	if (!pwm->in_use) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	pwm->period.pwm_size = period->pwm_size;
+	pwm->period.clk = period->clk;
+	pwm->period.pre_div = period->pre_div;
+	pwm->period.pre_div_exp = period->pre_div_exp;
+
+	pm8058_pwm_save_period(pwm);
+	pm8058_pwm_bank_sel(pwm);
+	rc = pm8058_pwm_write(pwm, 4, 6);
+
+out_unlock:
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_config_period);
+
+/**
+ * pm8058_pwm_config_duty_cycle - change PWM duty cycle
+ *
+ * @pwm: the PWM device
+ * @pwm_value: the duty cycle in raw PWM value (< 2^pwm_size)
+ */
+int pm8058_pwm_config_duty_cycle(struct pwm_device *pwm, int pwm_value)
+{
+	struct pm8058_pwm_lut	lut;
+	int		flags, start_idx;
+	int		rc = 0;
+
+	if (pwm == NULL || IS_ERR(pwm))
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+
+	if (!pwm->in_use || !pwm->pwm_period) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (pwm->pwm_value == pwm_value)
+		goto out_unlock;
+
+	pwm->pwm_value = pwm_value;
+	flags = PM_PWM_LUT_RAMP_UP;
+
+	start_idx = pwm->pwm_id * CHAN_LUT_SIZE;
+	pm8058_pwm_change_table(pwm, &pwm_value, start_idx, 1, 1);
+
+	if (!pwm->use_lut) {
+		pwm->use_lut = 1;
+
+		lut.lut_duty_ms = 1;
+		lut.lut_lo_index = start_idx;
+		lut.lut_hi_index = start_idx;
+		lut.lut_pause_lo = 0;
+		lut.lut_pause_hi = 0;
+		lut.flags = flags;
+
+		rc = pm8058_pwm_change_lut(pwm, &lut);
+	} else {
+		pm8058_pwm_save_index(pwm, start_idx, start_idx, flags);
+		pm8058_pwm_save(&pwm->pwm_ctl[1], PM8058_PWM_BYPASS_LUT, 0);
+
+		pm8058_pwm_bank_sel(pwm);
+		rc = pm8058_pwm_write(pwm, 0, 3);
+	}
+
+	if (rc)
+		pr_err("[%d]: pm8058_pwm_write: rc=%d\n", pwm->pwm_id, rc);
+
+out_unlock:
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_config_duty_cycle);
+
+/**
+ * pm8058_pwm_lut_config - change a PWM device configuration to use LUT
+ *
+ * @pwm: the PWM device
+ * @period_us: period in micro second
+ * @duty_pct: arrary of duty cycles in percent, like 20, 50.
+ * @duty_time_ms: time for each duty cycle in millisecond
+ * @start_idx: start index in lookup table from 0 to MAX-1
+ * @idx_len: number of index
+ * @pause_lo: pause time in millisecond at low index
+ * @pause_hi: pause time in millisecond at high index
+ * @flags: control flags
+ */
+int pm8058_pwm_lut_config(struct pwm_device *pwm, int period_us,
+			  int duty_pct[], int duty_time_ms, int start_idx,
+			  int idx_len, int pause_lo, int pause_hi, int flags)
+{
+	struct pm8058_pwm_lut		lut;
+	int	len;
+	int	rc;
+
+	if (pwm == NULL || IS_ERR(pwm) || !idx_len)
+		return -EINVAL;
+	if (duty_pct == NULL && !(flags & PM_PWM_LUT_NO_TABLE))
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+	if (idx_len >= PM_PWM_LUT_SIZE && start_idx)
+		return -EINVAL;
+	if ((start_idx + idx_len) > PM_PWM_LUT_SIZE)
+		return -EINVAL;
+	if ((unsigned)period_us > PM_PWM_PERIOD_MAX ||
+		(unsigned)period_us < PM_PWM_PERIOD_MIN)
+		return -EINVAL;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+
+	if (!pwm->in_use) {
+		rc = -EINVAL;
+		goto out_unlock;
+	}
+
+	if (pwm->pwm_period != period_us) {
+		pm8058_pwm_calc_period(period_us, &pwm->period);
+		pm8058_pwm_save_period(pwm);
+		pwm->pwm_period = period_us;
+	}
+
+	len = (idx_len > PM_PWM_LUT_SIZE) ? PM_PWM_LUT_SIZE : idx_len;
+
+	if (flags & PM_PWM_LUT_NO_TABLE)
+		goto after_table_write;
+
+	rc = pm8058_pwm_change_table(pwm, duty_pct, start_idx, len, 0);
+	if (rc) {
+		pr_err("pm8058_pwm_change_table: rc=%d\n", rc);
+		goto out_unlock;
+	}
+
+after_table_write:
+	lut.lut_duty_ms = duty_time_ms;
+	lut.lut_lo_index = start_idx;
+	lut.lut_hi_index = start_idx + len - 1;
+	lut.lut_pause_lo = pause_lo;
+	lut.lut_pause_hi = pause_hi;
+	lut.flags = flags;
+
+	rc = pm8058_pwm_change_lut(pwm, &lut);
+
+out_unlock:
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_lut_config);
+
+/**
+ * pm8058_pwm_lut_enable - control a PWM device to start/stop LUT ramp
+ *
+ * @pwm: the PWM device
+ * @start: to start (1), or stop (0)
+ */
+int pm8058_pwm_lut_enable(struct pwm_device *pwm, int start)
+{
+	if (pwm == NULL || IS_ERR(pwm))
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	mutex_lock(&pwm->chip->pwm_mutex);
+	if (start) {
+		pm8058_pwm_bank_enable(pwm, 1);
+
+		pm8058_pwm_bank_sel(pwm);
+		pm8058_pwm_start(pwm, 1, 1);
+	} else {
+		pm8058_pwm_bank_sel(pwm);
+		pm8058_pwm_start(pwm, 0, 0);
+
+		pm8058_pwm_bank_enable(pwm, 0);
+	}
+	mutex_unlock(&pwm->chip->pwm_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_pwm_lut_enable);
+
+#define SSBI_REG_ADDR_LED_BASE		0x131
+#define SSBI_REG_ADDR_LED(n)		(SSBI_REG_ADDR_LED_BASE + (n))
+#define SSBI_REG_ADDR_FLASH_BASE	0x48
+#define SSBI_REG_ADDR_FLASH_DRV_1	0xFB
+#define SSBI_REG_ADDR_FLASH(n)		(((n) < 2 ? \
+					    SSBI_REG_ADDR_FLASH_BASE + (n) : \
+					    SSBI_REG_ADDR_FLASH_DRV_1))
+
+#define PM8058_LED_CURRENT_SHIFT	3
+#define PM8058_LED_MODE_MASK		0x07
+
+#define PM8058_FLASH_CURRENT_SHIFT	4
+#define PM8058_FLASH_MODE_MASK		0x03
+#define PM8058_FLASH_MODE_NONE		0
+#define PM8058_FLASH_MODE_DTEST1	1
+#define PM8058_FLASH_MODE_DTEST2	2
+#define PM8058_FLASH_MODE_PWM		3
+
+int pm8058_pwm_config_led(struct pwm_device *pwm, int id,
+			  int mode, int max_current)
+{
+	int	rc;
+	u8	conf;
+
+	switch (id) {
+	case PM_PWM_LED_0:
+	case PM_PWM_LED_1:
+	case PM_PWM_LED_2:
+		conf = mode & PM8058_LED_MODE_MASK;
+		conf |= (max_current / 2) << PM8058_LED_CURRENT_SHIFT;
+		rc = pm8xxx_writeb(pwm->dev->parent,
+				  SSBI_REG_ADDR_LED(id), conf);
+		break;
+
+	case PM_PWM_LED_KPD:
+	case PM_PWM_LED_FLASH:
+	case PM_PWM_LED_FLASH1:
+		switch (mode) {
+		case PM_PWM_CONF_PWM1:
+		case PM_PWM_CONF_PWM2:
+		case PM_PWM_CONF_PWM3:
+			conf = PM8058_FLASH_MODE_PWM;
+			break;
+		case PM_PWM_CONF_DTEST1:
+			conf = PM8058_FLASH_MODE_DTEST1;
+			break;
+		case PM_PWM_CONF_DTEST2:
+			conf = PM8058_FLASH_MODE_DTEST2;
+			break;
+		default:
+			conf = PM8058_FLASH_MODE_NONE;
+			break;
+		}
+		conf |= (max_current / 20) << PM8058_FLASH_CURRENT_SHIFT;
+		id -= PM_PWM_LED_KPD;
+		rc = pm8xxx_writeb(pwm->dev->parent,
+				  SSBI_REG_ADDR_FLASH(id), conf);
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_config_led);
+
+int pm8058_pwm_set_dtest(struct pwm_device *pwm, int enable)
+{
+	int	rc;
+	u8	reg;
+
+	if (pwm == NULL || IS_ERR(pwm))
+		return -EINVAL;
+	if (pwm->chip == NULL)
+		return -ENODEV;
+
+	if (!pwm->in_use)
+		rc = -EINVAL;
+	else {
+		reg = pwm->pwm_id & PM8058_PWM_DTEST_BANK_MASK;
+		if (enable)
+			/* Only Test 1 available */
+			reg |= (1 << PM8058_PWM_DTEST_SHIFT) &
+				PM8058_PWM_DTEST_MASK;
+		rc = pm8xxx_writeb(pwm->dev->parent,
+			SSBI_REG_ADDR_LPG_TEST, reg);
+		if (rc)
+			pr_err("pm8xxx_write(DTEST=0x%x): rc=%d\n", reg, rc);
+
+	}
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_pwm_set_dtest);
+
+static int __devinit pmic8058_pwm_probe(struct platform_device *pdev)
+{
+	struct pm8058_pwm_chip	*chip;
+	int	i;
+
+	chip = kzalloc(sizeof *chip, GFP_KERNEL);
+	if (chip == NULL) {
+		pr_err("kzalloc() failed.\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < PM8058_PWM_CHANNELS; i++) {
+		chip->pwm_dev[i].pwm_id = i;
+		chip->pwm_dev[i].chip = chip;
+		chip->pwm_dev[i].dev = &pdev->dev;
+	}
+
+	mutex_init(&chip->pwm_mutex);
+
+	chip->pdata = pdev->dev.platform_data;
+	pwm_chip = chip;
+	platform_set_drvdata(pdev, chip);
+
+	pr_notice("OK\n");
+	return 0;
+}
+
+static int __devexit pmic8058_pwm_remove(struct platform_device *pdev)
+{
+	struct pm8058_pwm_chip	*chip = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	kfree(chip);
+	return 0;
+}
+
+static struct platform_driver pmic8058_pwm_driver = {
+	.probe		= pmic8058_pwm_probe,
+	.remove		= __devexit_p(pmic8058_pwm_remove),
+	.driver		= {
+		.name = "pm8058-pwm",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pm8058_pwm_init(void)
+{
+	return platform_driver_register(&pmic8058_pwm_driver);
+}
+
+static void __exit pm8058_pwm_exit(void)
+{
+	platform_driver_unregister(&pmic8058_pwm_driver);
+}
+
+subsys_initcall(pm8058_pwm_init);
+module_exit(pm8058_pwm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("PMIC8058 PWM driver");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:pmic8058_pwm");
diff --git a/drivers/misc/pmic8058-xoadc.c b/drivers/misc/pmic8058-xoadc.c
new file mode 100644
index 0000000..3452672
--- /dev/null
+++ b/drivers/misc/pmic8058-xoadc.c
@@ -0,0 +1,800 @@
+/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/msm_adc.h>
+#include <linux/mfd/pm8xxx/core.h>
+#include <linux/mfd/pmic8058.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/ratelimit.h>
+#include <linux/delay.h>
+#include <linux/wakelock.h>
+
+#include <mach/mpp.h>
+#include <mach/msm_xo.h>
+
+#define ADC_DRIVER_NAME			"pm8058-xoadc"
+
+#define MAX_QUEUE_LENGTH        0X15
+#define MAX_CHANNEL_PROPERTIES_QUEUE    0X7
+#define MAX_QUEUE_SLOT		0x1
+
+/* User Processor */
+#define ADC_ARB_USRP_CNTRL                      0x197
+	#define ADC_ARB_USRP_CNTRL_EN_ARB	BIT(0)
+	#define ADC_ARB_USRP_CNTRL_RSV1		BIT(1)
+	#define ADC_ARB_USRP_CNTRL_RSV2		BIT(2)
+	#define ADC_ARB_USRP_CNTRL_RSV3		BIT(3)
+	#define ADC_ARB_USRP_CNTRL_RSV4		BIT(4)
+	#define ADC_ARB_USRP_CNTRL_RSV5		BIT(5)
+	#define ADC_ARB_USRP_CNTRL_EOC		BIT(6)
+	#define ADC_ARB_USRP_CNTRL_REQ		BIT(7)
+
+#define ADC_ARB_USRP_AMUX_CNTRL         0x198
+#define ADC_ARB_USRP_ANA_PARAM          0x199
+#define ADC_ARB_USRP_DIG_PARAM          0x19A
+#define ADC_ARB_USRP_RSV                        0x19B
+
+#define ADC_ARB_USRP_DATA0                      0x19D
+#define ADC_ARB_USRP_DATA1                      0x19C
+
+struct pmic8058_adc {
+	struct device *dev;
+	struct xoadc_platform_data *pdata;
+	struct adc_properties *adc_prop;
+	struct xoadc_conv_state	conv[2];
+	int xoadc_queue_count;
+	int adc_irq;
+	struct linear_graph *adc_graph;
+	struct xoadc_conv_state *conv_slot_request;
+	struct xoadc_conv_state *conv_queue_list;
+	struct adc_conv_slot conv_queue_elements[MAX_QUEUE_LENGTH];
+	int xoadc_num;
+	struct msm_xo_voter *adc_voter;
+	struct wake_lock adc_wakelock;
+	/* flag to warn/bug if wakelocks are taken after suspend_noirq */
+	int msm_suspend_check;
+};
+
+static struct pmic8058_adc *pmic_adc[XOADC_PMIC_0 + 1];
+
+static bool xoadc_initialized, xoadc_calib_first_adc;
+
+DEFINE_RATELIMIT_STATE(pm8058_xoadc_msg_ratelimit,
+		DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);
+
+static inline int pm8058_xoadc_can_print(void)
+{
+	return __ratelimit(&pm8058_xoadc_msg_ratelimit);
+}
+
+int32_t pm8058_xoadc_registered(void)
+{
+	return xoadc_initialized;
+}
+EXPORT_SYMBOL(pm8058_xoadc_registered);
+
+void pm8058_xoadc_restore_slot(uint32_t adc_instance,
+					struct adc_conv_slot *slot)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct xoadc_conv_state *slot_state = adc_pmic->conv_slot_request;
+
+	mutex_lock(&slot_state->list_lock);
+	list_add(&slot->list, &slot_state->slots);
+	mutex_unlock(&slot_state->list_lock);
+}
+EXPORT_SYMBOL(pm8058_xoadc_restore_slot);
+
+void pm8058_xoadc_slot_request(uint32_t adc_instance,
+					struct adc_conv_slot **slot)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct xoadc_conv_state *slot_state = adc_pmic->conv_slot_request;
+
+	mutex_lock(&slot_state->list_lock);
+
+	if (!list_empty(&slot_state->slots)) {
+		*slot = list_first_entry(&slot_state->slots,
+				struct adc_conv_slot, list);
+		list_del(&(*slot)->list);
+	} else
+		*slot = NULL;
+
+	mutex_unlock(&slot_state->list_lock);
+}
+EXPORT_SYMBOL(pm8058_xoadc_slot_request);
+
+static int32_t pm8058_xoadc_arb_cntrl(uint32_t arb_cntrl,
+				uint32_t adc_instance, uint32_t channel)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	int i, rc;
+	u8 data_arb_cntrl;
+
+	data_arb_cntrl = ADC_ARB_USRP_CNTRL_EOC |
+			ADC_ARB_USRP_CNTRL_RSV5 |
+			ADC_ARB_USRP_CNTRL_RSV4;
+
+	if (arb_cntrl) {
+		if (adc_pmic->msm_suspend_check)
+			pr_err("XOADC request being made after suspend irq "
+				 "with channel id:%d\n", channel);
+		data_arb_cntrl |= ADC_ARB_USRP_CNTRL_EN_ARB;
+		msm_xo_mode_vote(adc_pmic->adc_voter, MSM_XO_MODE_ON);
+		adc_pmic->pdata->xoadc_mpp_config();
+		wake_lock(&adc_pmic->adc_wakelock);
+	}
+
+	/* Write twice to the CNTRL register for the arbiter settings
+	   to take into effect */
+	for (i = 0; i < 2; i++) {
+		rc = pm8xxx_writeb(adc_pmic->dev->parent, ADC_ARB_USRP_CNTRL,
+							data_arb_cntrl);
+		if (rc < 0) {
+			pr_debug("%s: PM8058 write failed\n", __func__);
+			return rc;
+		}
+	}
+
+	if (!arb_cntrl) {
+		msm_xo_mode_vote(adc_pmic->adc_voter, MSM_XO_MODE_OFF);
+		wake_unlock(&adc_pmic->adc_wakelock);
+	}
+
+	return 0;
+}
+
+static int32_t pm8058_xoadc_configure(uint32_t adc_instance,
+					struct adc_conv_slot *slot)
+{
+
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	u8 data_arb_cntrl = 0, data_amux_chan = 0, data_arb_rsv = 0;
+	u8 data_dig_param = 0, data_ana_param2 = 0, data_ana_param = 0;
+	int rc;
+
+	rc = pm8058_xoadc_arb_cntrl(1, adc_instance, slot->chan_path);
+	if (rc < 0) {
+		pr_debug("%s: Configuring ADC Arbiter"
+				"enable failed\n", __func__);
+		return rc;
+	}
+
+	switch (slot->chan_path) {
+
+	case CHAN_PATH_TYPE1:
+		data_amux_chan = CHANNEL_VCOIN << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 2;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE2:
+		data_amux_chan = CHANNEL_VBAT << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 3;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE3:
+		data_amux_chan = CHANNEL_VCHG << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 10;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE4:
+		data_amux_chan = CHANNEL_CHG_MONITOR << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE5:
+		data_amux_chan = CHANNEL_VPH_PWR << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 3;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE6:
+		data_amux_chan = CHANNEL_MPP5 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[1];
+		break;
+
+	case CHAN_PATH_TYPE7:
+		data_amux_chan = CHANNEL_MPP6 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE8:
+		data_amux_chan = CHANNEL_MPP7 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 2;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE9:
+		data_amux_chan = CHANNEL_MPP8 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 2;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE10:
+		data_amux_chan = CHANNEL_MPP9 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 3;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE11:
+		data_amux_chan = CHANNEL_USB_VBUS << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 3;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE12:
+		data_amux_chan = CHANNEL_DIE_TEMP << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE13:
+		data_amux_chan = CHANNEL_125V << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE14:
+		data_amux_chan = CHANNEL_INTERNAL_2 << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+
+	case CHAN_PATH_TYPE_NONE:
+		data_amux_chan = CHANNEL_MUXOFF << 4;
+		data_arb_rsv = 0x10;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[1];
+		break;
+
+	case CHAN_PATH_TYPE15:
+		data_amux_chan = CHANNEL_INTERNAL << 4;
+		data_arb_rsv = 0x20;
+		slot->chan_properties.gain_numerator = 1;
+		slot->chan_properties.gain_denominator = 1;
+		slot->chan_properties.adc_graph = &adc_pmic->adc_graph[0];
+		break;
+	}
+
+	rc = pm8xxx_writeb(adc_pmic->dev->parent,
+			ADC_ARB_USRP_AMUX_CNTRL, data_amux_chan);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	rc = pm8xxx_writeb(adc_pmic->dev->parent,
+			ADC_ARB_USRP_RSV, data_arb_rsv);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	/* Set default clock rate to 2.4 MHz XO ADC clock digital */
+	switch (slot->chan_adc_config) {
+
+	case ADC_CONFIG_TYPE1:
+		data_ana_param = 0xFE;
+		data_dig_param = 0x23;
+		data_ana_param2 = 0xFF;
+		/* AMUX register data to start the ADC conversion */
+		data_arb_cntrl = 0xF1;
+		break;
+
+	case ADC_CONFIG_TYPE2:
+		data_ana_param = 0xFE;
+		data_dig_param = 0x03;
+		data_ana_param2 = 0xFF;
+		/* AMUX register data to start the ADC conversion */
+		data_arb_cntrl = 0xF1;
+		break;
+	}
+
+	rc = pm8xxx_writeb(adc_pmic->dev->parent,
+				ADC_ARB_USRP_ANA_PARAM, data_ana_param);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	rc = pm8xxx_writeb(adc_pmic->dev->parent,
+			ADC_ARB_USRP_DIG_PARAM, data_dig_param);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	rc = pm8xxx_writeb(adc_pmic->dev->parent,
+			ADC_ARB_USRP_ANA_PARAM, data_ana_param2);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	enable_irq(adc_pmic->adc_irq);
+
+	rc = pm8xxx_writeb(adc_pmic->dev->parent,
+				ADC_ARB_USRP_CNTRL, data_arb_cntrl);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 write failed\n", __func__);
+		return rc;
+	}
+
+	return 0;
+}
+
+int32_t pm8058_xoadc_select_chan_and_start_conv(uint32_t adc_instance,
+					struct adc_conv_slot *slot)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list;
+
+	if (!xoadc_initialized)
+		return -ENODEV;
+
+	mutex_lock(&slot_state->list_lock);
+	list_add_tail(&slot->list, &slot_state->slots);
+	if (adc_pmic->xoadc_queue_count == 0) {
+		if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+			adc_pmic->pdata->xoadc_vreg_set(1);
+		pm8058_xoadc_configure(adc_instance, slot);
+	}
+	adc_pmic->xoadc_queue_count++;
+	mutex_unlock(&slot_state->list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_xoadc_select_chan_and_start_conv);
+
+static int32_t pm8058_xoadc_dequeue_slot_request(uint32_t adc_instance,
+				struct adc_conv_slot **slot)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list;
+	int rc = 0;
+
+	mutex_lock(&slot_state->list_lock);
+	if (adc_pmic->xoadc_queue_count > 0 &&
+			!list_empty(&slot_state->slots)) {
+		*slot = list_first_entry(&slot_state->slots,
+			struct adc_conv_slot, list);
+		list_del(&(*slot)->list);
+	} else
+		rc = -EINVAL;
+	mutex_unlock(&slot_state->list_lock);
+
+	if (rc < 0) {
+		if (pm8058_xoadc_can_print())
+			pr_err("Pmic 8058 xoadc spurious interrupt detected\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+int32_t pm8058_xoadc_read_adc_code(uint32_t adc_instance, int32_t *data)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct xoadc_conv_state *slot_state = adc_pmic->conv_queue_list;
+	uint8_t rslt_lsb, rslt_msb;
+	struct adc_conv_slot *slot;
+	int32_t rc, max_ideal_adc_code = 1 << adc_pmic->adc_prop->bitresolution;
+
+	if (!xoadc_initialized)
+		return -ENODEV;
+
+	rc = pm8xxx_readb(adc_pmic->dev->parent, ADC_ARB_USRP_DATA0,
+							&rslt_lsb);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 read failed\n", __func__);
+		return rc;
+	}
+
+	rc = pm8xxx_readb(adc_pmic->dev->parent, ADC_ARB_USRP_DATA1,
+							&rslt_msb);
+	if (rc < 0) {
+		pr_debug("%s: PM8058 read failed\n", __func__);
+		return rc;
+	}
+
+	*data = (rslt_msb << 8) | rslt_lsb;
+
+	/* Use the midpoint to determine underflow or overflow */
+	if (*data > max_ideal_adc_code + (max_ideal_adc_code >> 1))
+		*data |= ((1 << (8 * sizeof(*data) -
+			adc_pmic->adc_prop->bitresolution)) - 1) <<
+			adc_pmic->adc_prop->bitresolution;
+	/* Return if this is a calibration run since there
+	 * is no need to check requests in the waiting queue */
+	if (xoadc_calib_first_adc)
+		return 0;
+
+	mutex_lock(&slot_state->list_lock);
+	adc_pmic->xoadc_queue_count--;
+	if (adc_pmic->xoadc_queue_count > 0) {
+		slot = list_first_entry(&slot_state->slots,
+				struct adc_conv_slot, list);
+		pm8058_xoadc_configure(adc_instance, slot);
+	}
+	mutex_unlock(&slot_state->list_lock);
+
+	mutex_lock(&slot_state->list_lock);
+	/* Default value for switching off the arbiter after reading
+	   the ADC value. Bit 0 set to 0. */
+	if (adc_pmic->xoadc_queue_count == 0) {
+		rc = pm8058_xoadc_arb_cntrl(0, adc_instance, CHANNEL_MUXOFF);
+		if (rc < 0) {
+			pr_debug("%s: Configuring ADC Arbiter disable"
+						"failed\n", __func__);
+			return rc;
+		}
+		if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+			adc_pmic->pdata->xoadc_vreg_set(0);
+	}
+	mutex_unlock(&slot_state->list_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_xoadc_read_adc_code);
+
+static irqreturn_t pm8058_xoadc(int irq, void *dev_id)
+{
+	struct pmic8058_adc *xoadc_8058 = dev_id;
+	struct adc_conv_slot *slot = NULL;
+	int rc;
+
+	disable_irq_nosync(xoadc_8058->adc_irq);
+
+	if (xoadc_calib_first_adc)
+		return IRQ_HANDLED;
+
+	rc = pm8058_xoadc_dequeue_slot_request(xoadc_8058->xoadc_num, &slot);
+
+	if (rc < 0)
+		return IRQ_NONE;
+
+	if (rc == 0)
+		msm_adc_conv_cb(slot, 0, NULL, 0);
+
+	return IRQ_HANDLED;
+}
+
+struct adc_properties *pm8058_xoadc_get_properties(uint32_t dev_instance)
+{
+	struct pmic8058_adc *xoadc_8058 = pmic_adc[dev_instance];
+
+	return xoadc_8058->adc_prop;
+}
+EXPORT_SYMBOL(pm8058_xoadc_get_properties);
+
+int32_t pm8058_xoadc_calib_device(uint32_t adc_instance)
+{
+	struct pmic8058_adc *adc_pmic = pmic_adc[adc_instance];
+	struct adc_conv_slot *slot;
+	int rc, offset_xoadc, slope_xoadc, calib_read_1, calib_read_2;
+
+	if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+		adc_pmic->pdata->xoadc_vreg_set(1);
+
+	pm8058_xoadc_slot_request(adc_instance, &slot);
+	if (slot) {
+		slot->chan_path = CHAN_PATH_TYPE13;
+		slot->chan_adc_config = ADC_CONFIG_TYPE2;
+		slot->chan_adc_calib = ADC_CONFIG_TYPE2;
+		xoadc_calib_first_adc = true;
+		rc = pm8058_xoadc_configure(adc_instance, slot);
+		if (rc) {
+			pr_err("pm8058_xoadc configure failed\n");
+			goto fail;
+		}
+	} else {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	msleep(3);
+
+	rc = pm8058_xoadc_read_adc_code(adc_instance, &calib_read_1);
+	if (rc) {
+		pr_err("pm8058_xoadc read adc failed\n");
+		xoadc_calib_first_adc = false;
+		goto fail;
+	}
+	xoadc_calib_first_adc = false;
+
+	pm8058_xoadc_slot_request(adc_instance, &slot);
+	if (slot) {
+		slot->chan_path = CHAN_PATH_TYPE15;
+		slot->chan_adc_config = ADC_CONFIG_TYPE2;
+		slot->chan_adc_calib = ADC_CONFIG_TYPE2;
+		xoadc_calib_first_adc = true;
+		rc = pm8058_xoadc_configure(adc_instance, slot);
+		if (rc) {
+			pr_err("pm8058_xoadc configure failed\n");
+			goto fail;
+		}
+	} else {
+		rc = -EINVAL;
+		goto fail;
+	}
+
+	msleep(3);
+
+	rc = pm8058_xoadc_read_adc_code(adc_instance, &calib_read_2);
+	if (rc) {
+		pr_err("pm8058_xoadc read adc failed\n");
+		xoadc_calib_first_adc = false;
+		goto fail;
+	}
+	xoadc_calib_first_adc = false;
+
+	pm8058_xoadc_restore_slot(adc_instance, slot);
+
+	slope_xoadc = (((calib_read_1 - calib_read_2) << 10)/
+					CHANNEL_ADC_625_MV);
+	offset_xoadc = calib_read_2 -
+			((slope_xoadc * CHANNEL_ADC_625_MV) >> 10);
+
+	printk(KERN_INFO"pmic8058_xoadc:The offset for AMUX calibration"
+						"was %d\n", offset_xoadc);
+
+	adc_pmic->adc_graph[0].offset = offset_xoadc;
+	adc_pmic->adc_graph[0].dy = (calib_read_1 - calib_read_2);
+	adc_pmic->adc_graph[0].dx = CHANNEL_ADC_625_MV;
+
+	/* Retain ideal calibration settings for therm readings */
+	adc_pmic->adc_graph[1].offset = 0 ;
+	adc_pmic->adc_graph[1].dy = (1 << 15) - 1;
+	adc_pmic->adc_graph[1].dx = 2200;
+
+	if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+		adc_pmic->pdata->xoadc_vreg_set(0);
+
+	return 0;
+fail:
+	if (adc_pmic->pdata->xoadc_vreg_set != NULL)
+		adc_pmic->pdata->xoadc_vreg_set(0);
+
+	return rc;
+}
+EXPORT_SYMBOL(pm8058_xoadc_calib_device);
+
+int32_t pm8058_xoadc_calibrate(uint32_t dev_instance,
+				struct adc_conv_slot *slot, int *calib_status)
+{
+	*calib_status = CALIB_NOT_REQUIRED;
+
+	return 0;
+}
+EXPORT_SYMBOL(pm8058_xoadc_calibrate);
+
+#ifdef CONFIG_PM
+static int pm8058_xoadc_suspend_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pmic8058_adc *adc_pmic = platform_get_drvdata(pdev);
+
+	adc_pmic->msm_suspend_check = 1;
+
+	return 0;
+}
+
+static int pm8058_xoadc_resume_noirq(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct pmic8058_adc *adc_pmic = platform_get_drvdata(pdev);
+
+	adc_pmic->msm_suspend_check = 0;
+
+	return 0;
+}
+
+static const struct dev_pm_ops pm8058_xoadc_dev_pm_ops = {
+	.suspend_noirq = pm8058_xoadc_suspend_noirq,
+	.resume_noirq = pm8058_xoadc_resume_noirq,
+};
+
+#define PM8058_XOADC_DEV_PM_OPS	(&pm8058_xoadc_dev_pm_ops)
+#else
+#define PM8058_XOADC_DEV_PM_OPS NULL
+#endif
+
+static int __devinit pm8058_xoadc_probe(struct platform_device *pdev)
+{
+	struct xoadc_platform_data *pdata = pdev->dev.platform_data;
+	struct pmic8058_adc *adc_pmic;
+	int i, rc = 0;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data?\n");
+		return -EINVAL;
+	}
+
+	adc_pmic = devm_kzalloc(&pdev->dev, sizeof(*adc_pmic), GFP_KERNEL);
+	if (!adc_pmic) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	adc_pmic->dev = &pdev->dev;
+	adc_pmic->adc_prop = pdata->xoadc_prop;
+	adc_pmic->xoadc_num = pdata->xoadc_num;
+	adc_pmic->xoadc_queue_count = 0;
+
+	platform_set_drvdata(pdev, adc_pmic);
+
+	if (adc_pmic->xoadc_num > XOADC_PMIC_0) {
+		dev_err(&pdev->dev, "ADC device not supported\n");
+		return -EINVAL;
+	}
+
+	adc_pmic->pdata = pdata;
+	adc_pmic->adc_graph = devm_kzalloc(&pdev->dev,
+		sizeof(struct linear_graph) * MAX_CHANNEL_PROPERTIES_QUEUE,
+		GFP_KERNEL);
+	if (!adc_pmic->adc_graph) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	/* Will be replaced by individual channel calibration */
+	for (i = 0; i < MAX_CHANNEL_PROPERTIES_QUEUE; i++) {
+		adc_pmic->adc_graph[i].offset = 0 ;
+		adc_pmic->adc_graph[i].dy = (1 << 15) - 1;
+		adc_pmic->adc_graph[i].dx = 2200;
+	}
+
+	if (pdata->xoadc_mpp_config != NULL)
+		pdata->xoadc_mpp_config();
+
+	adc_pmic->conv_slot_request = &adc_pmic->conv[0];
+	adc_pmic->conv_slot_request->context =
+		&adc_pmic->conv_queue_elements[0];
+
+	mutex_init(&adc_pmic->conv_slot_request->list_lock);
+	INIT_LIST_HEAD(&adc_pmic->conv_slot_request->slots);
+
+	/* tie each slot and initwork them */
+	for (i = 0; i < MAX_QUEUE_LENGTH; i++) {
+		list_add(&adc_pmic->conv_slot_request->context[i].list,
+					&adc_pmic->conv_slot_request->slots);
+		INIT_WORK(&adc_pmic->conv_slot_request->context[i].work,
+							msm_adc_wq_work);
+		init_completion(&adc_pmic->conv_slot_request->context[i].comp);
+		adc_pmic->conv_slot_request->context[i].idx = i;
+	}
+
+	adc_pmic->conv_queue_list = &adc_pmic->conv[1];
+
+	mutex_init(&adc_pmic->conv_queue_list->list_lock);
+	INIT_LIST_HEAD(&adc_pmic->conv_queue_list->slots);
+
+	adc_pmic->adc_irq = platform_get_irq(pdev, 0);
+	if (adc_pmic->adc_irq < 0)
+		return -ENXIO;
+
+	rc = request_threaded_irq(adc_pmic->adc_irq,
+				NULL, pm8058_xoadc,
+		IRQF_TRIGGER_RISING, "pm8058_adc_interrupt", adc_pmic);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to request adc irq\n");
+		return rc;
+	}
+
+	disable_irq(adc_pmic->adc_irq);
+
+	if (adc_pmic->adc_voter == NULL) {
+		adc_pmic->adc_voter = msm_xo_get(MSM_XO_TCXO_D1,
+							"pmic8058_xoadc");
+		if (IS_ERR(adc_pmic->adc_voter)) {
+			dev_err(&pdev->dev, "Failed to get XO vote\n");
+			return PTR_ERR(adc_pmic->adc_voter);
+		}
+	}
+
+	device_init_wakeup(&pdev->dev, pdata->xoadc_wakeup);
+	wake_lock_init(&adc_pmic->adc_wakelock, WAKE_LOCK_SUSPEND,
+					"pmic8058_xoadc_wakelock");
+
+	pmic_adc[adc_pmic->xoadc_num] = adc_pmic;
+
+	if (pdata->xoadc_vreg_setup != NULL)
+		pdata->xoadc_vreg_setup();
+
+	xoadc_initialized = true;
+	xoadc_calib_first_adc = false;
+
+	return 0;
+}
+
+static int __devexit pm8058_xoadc_teardown(struct platform_device *pdev)
+{
+	struct pmic8058_adc *adc_pmic = platform_get_drvdata(pdev);
+
+	if (adc_pmic->pdata->xoadc_vreg_shutdown != NULL)
+		adc_pmic->pdata->xoadc_vreg_shutdown();
+
+	wake_lock_destroy(&adc_pmic->adc_wakelock);
+	msm_xo_put(adc_pmic->adc_voter);
+	device_init_wakeup(&pdev->dev, 0);
+	xoadc_initialized = false;
+
+	return 0;
+}
+
+static struct platform_driver pm8058_xoadc_driver = {
+	.probe = pm8058_xoadc_probe,
+	.remove = __devexit_p(pm8058_xoadc_teardown),
+	.driver = {
+		.name = "pm8058-xoadc",
+		.owner = THIS_MODULE,
+		.pm = PM8058_XOADC_DEV_PM_OPS,
+	},
+};
+
+static int __init pm8058_xoadc_init(void)
+{
+	return platform_driver_register(&pm8058_xoadc_driver);
+}
+module_init(pm8058_xoadc_init);
+
+static void __exit pm8058_xoadc_exit(void)
+{
+	platform_driver_unregister(&pm8058_xoadc_driver);
+}
+module_exit(pm8058_xoadc_exit);
+
+MODULE_ALIAS("platform:pmic8058_xoadc");
+MODULE_DESCRIPTION("PMIC8058 XOADC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/qfp_fuse.c b/drivers/misc/qfp_fuse.c
new file mode 100644
index 0000000..341e5b2
--- /dev/null
+++ b/drivers/misc/qfp_fuse.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/qfp_fuse.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+
+/*
+ * Time QFPROM requires to reliably burn a fuse.
+ */
+#define QFPROM_BLOW_TIMEOUT_US      10
+#define QFPROM_BLOW_TIMER_OFFSET    0x2038
+/*
+ * Denotes number of cycles required to blow the fuse.
+ */
+#define QFPROM_BLOW_TIMER_VALUE     (QFPROM_BLOW_TIMEOUT_US * 83)
+
+#define QFPROM_BLOW_STATUS_OFFSET   0x204C
+#define QFPROM_BLOW_STATUS_BUSY     0x01
+#define QFPROM_BLOW_STATUS_ERROR    0x02
+
+#define QFP_FUSE_READY              0x01
+#define QFP_FUSE_OFF                0x00
+
+struct qfp_priv_t {
+	uint32_t base;
+	uint32_t end;
+	struct mutex lock;
+	struct regulator *fuse_vdd;
+	u8 state;
+};
+
+/* We need only one instance of this for the driver */
+static struct qfp_priv_t *qfp_priv;
+
+
+static int qfp_fuse_open(struct inode *inode, struct file *filp)
+{
+	if (qfp_priv == NULL)
+		return -ENODEV;
+
+	filp->private_data = qfp_priv;
+
+	return 0;
+}
+
+static int qfp_fuse_release(struct inode *inode, struct file *filp)
+{
+
+	filp->private_data = NULL;
+
+	return 0;
+}
+
+static inline int qfp_fuse_wait_for_fuse_blow(u32 *status)
+{
+	u32 timeout = QFPROM_BLOW_TIMEOUT_US;
+	/* wait for 400us before checking for the first time */
+	udelay(400);
+	do {
+		*status = readl_relaxed(
+			qfp_priv->base + QFPROM_BLOW_STATUS_OFFSET);
+
+		if (!(*status & QFPROM_BLOW_STATUS_BUSY))
+			return 0;
+
+		timeout--;
+		udelay(1);
+	} while (timeout);
+	pr_err("Timeout waiting for FUSE blow, status = %x\n", *status);
+	return -ETIMEDOUT;
+}
+
+static inline int qfp_fuse_enable_regulator(void)
+{
+	int err;
+	err = regulator_enable(qfp_priv->fuse_vdd);
+	if (err != 0)
+		pr_err("Error (%d) enabling regulator\n", err);
+	return err;
+}
+
+static inline int qfp_fuse_disable_regulator(void)
+{
+	int err;
+	err = regulator_disable(qfp_priv->fuse_vdd);
+	if (err != 0)
+		pr_err("Error (%d) disabling regulator\n", err);
+	return err;
+}
+
+static int qfp_fuse_write_word(u32 *addr, u32 data)
+{
+	u32 blow_status = 0;
+	u32 read_data;
+	int err;
+
+	/* Set QFPROM  blow timer register */
+	writel_relaxed(QFPROM_BLOW_TIMER_VALUE,
+			qfp_priv->base + QFPROM_BLOW_TIMER_OFFSET);
+	mb();
+
+	/* Enable LVS0 regulator */
+	err = qfp_fuse_enable_regulator();
+	if (err != 0)
+		return err;
+
+	/*
+	 * Wait for about 1ms. However msleep(1) can sleep for
+	 * up to 20ms as per Documentation/timers/timers-howto.txt.
+	 * Time is not a constraint here.
+	 */
+
+	msleep(20);
+
+	/* Write data */
+	__raw_writel(data, addr);
+	mb();
+
+	/* blow_status = QFPROM_BLOW_STATUS_BUSY; */
+	err = qfp_fuse_wait_for_fuse_blow(&blow_status);
+	if (err) {
+		qfp_fuse_disable_regulator();
+		return err;
+	}
+
+	/* Check error status */
+	if (blow_status & QFPROM_BLOW_STATUS_ERROR) {
+		pr_err("Fuse blow status error: %d\n", blow_status);
+		qfp_fuse_disable_regulator();
+		return -EFAULT;
+	}
+
+	/* Disable regulator */
+	qfp_fuse_disable_regulator();
+	/*
+	 * Wait for about 1ms. However msleep(1) can sleep for
+	 * up to 20ms as per Documentation/timers/timers-howto.txt.
+	 * Time is not a constraint here.
+	 */
+	msleep(20);
+
+	/* Verify written data */
+	read_data = readl_relaxed(addr);
+	if (read_data != data) {
+		pr_err("Error: read/write data mismatch\n");
+		pr_err("Address = %p written data = %x read data = %x\n",
+			addr, data, read_data);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static long
+qfp_fuse_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	struct qfp_fuse_req req;
+	u32 *buf = NULL;
+	int i;
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QFP_FUSE_IOC_MAGIC)
+		return -ENOTTY;
+
+	switch (cmd) {
+	case QFP_FUSE_IOC_READ:
+		if (arg == 0) {
+			pr_err("user space arg not supplied\n");
+			err = -EFAULT;
+			break;
+		}
+
+		if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
+			pr_err("Error copying req from user space\n");
+			err = -EFAULT;
+			break;
+		}
+
+		/* Check for limits */
+		if (!req.size) {
+			pr_err("Request size zero.\n");
+			err = -EFAULT;
+			break;
+		}
+
+		if (qfp_priv->base + req.offset + (req.size - 1) * 4 >
+				qfp_priv->end) {
+			pr_err("Req size exceeds QFPROM addr space\n");
+			err = -EFAULT;
+			break;
+		}
+
+		/* Allocate memory for buffer */
+		buf = kzalloc(req.size * 4, GFP_KERNEL);
+		if (buf == NULL) {
+			pr_alert("No memory for data\n");
+			err = -ENOMEM;
+			break;
+		}
+
+		if (mutex_lock_interruptible(&qfp_priv->lock)) {
+			err = -ERESTARTSYS;
+			break;
+		}
+
+		/* Read data */
+		for (i = 0; i < req.size; i++)
+			buf[i] = readl_relaxed(
+				((u32 *) (qfp_priv->base + req.offset)) + i);
+
+		if (copy_to_user((void __user *)req.data, buf, 4*(req.size))) {
+			pr_err("Error copying to user space\n");
+			err = -EFAULT;
+		}
+
+		mutex_unlock(&qfp_priv->lock);
+		break;
+
+	case QFP_FUSE_IOC_WRITE:
+		if (arg == 0) {
+			pr_err("user space arg not supplied\n");
+			err = -EFAULT;
+			break;
+		}
+
+		if (copy_from_user(&req, (void __user *)arg, sizeof(req))) {
+			pr_err("Error copying req from user space\n");
+			err = -EFAULT;
+			break;
+		}
+		/* Check for limits */
+		if (!req.size) {
+			pr_err("Request size zero.\n");
+			err = -EFAULT;
+			break;
+		}
+		if (qfp_priv->base + req.offset + (req.size - 1) * 4 >
+				qfp_priv->end) {
+			pr_err("Req size exceeds QFPROM space\n");
+			err = -EFAULT;
+			break;
+		}
+
+		/* Allocate memory for buffer */
+		buf = kzalloc(4 * (req.size), GFP_KERNEL);
+		if (buf == NULL) {
+			pr_alert("No memory for data\n");
+			err = -ENOMEM;
+			break;
+		}
+
+		/* Copy user data to local buffer */
+		if (copy_from_user(buf, (void __user *)req.data,
+				4 * (req.size))) {
+			pr_err("Error copying data from user space\n");
+			err = -EFAULT;
+			break;
+		}
+
+		if (mutex_lock_interruptible(&qfp_priv->lock)) {
+			err = -ERESTARTSYS;
+			break;
+		}
+
+		/* Write data word at a time */
+		for (i = 0; i < req.size && !err; i++) {
+			err = qfp_fuse_write_word(((u32 *) (
+				qfp_priv->base + req.offset) + i), buf[i]);
+		}
+
+		mutex_unlock(&qfp_priv->lock);
+		break;
+	default:
+		pr_err("Invalid ioctl command.\n");
+		return -ENOTTY;
+	}
+	kfree(buf);
+	return err;
+}
+
+static const struct file_operations qfp_fuse_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qfp_fuse_ioctl,
+	.open = qfp_fuse_open,
+	.release = qfp_fuse_release
+};
+
+static struct miscdevice qfp_fuse_dev = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "qfpfuse",
+	.fops = &qfp_fuse_fops
+};
+
+
+static int qfp_fuse_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res;
+	const char *regulator_name = pdev->dev.platform_data;
+
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	if (!regulator_name)
+		return -EINVAL;
+
+	/* Initialize */
+	qfp_priv = kzalloc(sizeof(struct qfp_priv_t), GFP_KERNEL);
+
+	if (qfp_priv == NULL) {
+		pr_alert("Not enough memory to initialize device\n");
+		return -ENOMEM;
+	}
+
+	/* The driver is passed ioremapped address */
+	qfp_priv->base = res->start;
+	qfp_priv->end = res->end;
+
+	/* Get regulator for QFPROM writes */
+	qfp_priv->fuse_vdd = regulator_get(NULL, regulator_name);
+	if (IS_ERR(qfp_priv->fuse_vdd)) {
+		ret = PTR_ERR(qfp_priv->fuse_vdd);
+		pr_err("Err (%d) getting %s\n", ret, regulator_name);
+		qfp_priv->fuse_vdd = NULL;
+		goto err;
+	}
+
+	mutex_init(&qfp_priv->lock);
+
+	ret = misc_register(&qfp_fuse_dev);
+	if (ret < 0)
+		goto err;
+
+	pr_info("Fuse driver base:%x end:%x\n", qfp_priv->base, qfp_priv->end);
+	return 0;
+
+err:
+	if (qfp_priv->fuse_vdd)
+		regulator_put(qfp_priv->fuse_vdd);
+
+	kfree(qfp_priv);
+	qfp_priv = NULL;
+
+	return ret;
+
+}
+
+static int __devexit qfp_fuse_remove(struct platform_device *plat)
+{
+	if (qfp_priv && qfp_priv->fuse_vdd)
+		regulator_put(qfp_priv->fuse_vdd);
+
+	kfree(qfp_priv);
+	qfp_priv = NULL;
+
+	misc_deregister(&qfp_fuse_dev);
+	pr_info("Removing Fuse driver\n");
+	return 0;
+}
+
+static struct platform_driver qfp_fuse_driver = {
+	.probe = qfp_fuse_probe,
+	.remove = qfp_fuse_remove,
+	.driver = {
+		.name = "qfp_fuse_driver",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init qfp_fuse_init(void)
+{
+	return platform_driver_register(&qfp_fuse_driver);
+}
+
+static void __exit qfp_fuse_exit(void)
+{
+	platform_driver_unregister(&qfp_fuse_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rohit Vaswani <rvaswani@codeaurora.org>");
+MODULE_DESCRIPTION("Driver to read/write to QFPROM fuses.");
+MODULE_VERSION("1.01");
+
+module_init(qfp_fuse_init);
+module_exit(qfp_fuse_exit);
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
new file mode 100644
index 0000000..4c92ee5
--- /dev/null
+++ b/drivers/misc/qseecom.c
@@ -0,0 +1,1781 @@
+
+
+/* Qualcomm Secure Execution Environment Communicator (QSEECOM) driver
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/qseecom.h>
+#include <linux/freezer.h>
+#include <mach/board.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/scm.h>
+#include <mach/peripheral-loader.h>
+#include "qseecom_legacy.h"
+
+#define QSEECOM_DEV			"qseecom"
+#define QSEOS_VERSION_13		0x13
+#define QSEOS_VERSION_14		0x14
+#define QSEOS_CHECK_VERSION_CMD		0x00001803;
+
+enum qseecom_command_scm_resp_type {
+	QSEOS_APP_ID = 0xEE01,
+	QSEOS_LISTENER_ID
+};
+
+enum qseecom_qceos_cmd_id {
+	QSEOS_APP_START_COMMAND      = 0x01,
+	QSEOS_APP_SHUTDOWN_COMMAND,
+	QSEOS_APP_LOOKUP_COMMAND,
+	QSEOS_REGISTER_LISTENER,
+	QSEOS_DEREGISTER_LISTENER,
+	QSEOS_CLIENT_SEND_DATA_COMMAND,
+	QSEOS_LISTENER_DATA_RSP_COMMAND,
+	QSEOS_LOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_CMD_MAX     = 0xEFFFFFFF
+};
+
+enum qseecom_qceos_cmd_status {
+	QSEOS_RESULT_SUCCESS = 0,
+	QSEOS_RESULT_INCOMPLETE,
+	QSEOS_RESULT_FAILURE  = 0xFFFFFFFF
+};
+
+enum qseecom_clk_definitions {
+	CLK_DFAB = 0,
+	CLK_SFPB,
+};
+
+__packed struct qseecom_check_app_ireq {
+	uint32_t qsee_cmd_id;
+	char     app_name[MAX_APP_NAME_SIZE];
+};
+
+__packed struct qseecom_load_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;		/* Length of the mdt file */
+	uint32_t img_len;		/* Length of .bxx and .mdt files */
+	uint32_t phy_addr;		/* phy addr of the start of image */
+	char     app_name[MAX_APP_NAME_SIZE];	/* application name*/
+};
+
+__packed struct qseecom_unload_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  app_id;
+};
+
+__packed struct qseecom_register_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	void *sb_ptr;
+	uint32_t sb_len;
+};
+
+__packed struct qseecom_unregister_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  listener_id;
+};
+
+__packed struct qseecom_client_send_data_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	void *req_ptr;
+	uint32_t req_len;
+	void *rsp_ptr;   /* First 4 bytes should always be the return status */
+	uint32_t rsp_len;
+};
+
+/* send_data resp */
+__packed struct qseecom_client_listener_data_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+};
+
+/*
+ * struct qseecom_command_scm_resp - qseecom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_rsp_len: length of command response
+ */
+__packed struct qseecom_command_scm_resp {
+	uint32_t result;
+	enum qseecom_command_scm_resp_type resp_type;
+	unsigned int data;
+};
+
+static struct class *driver_class;
+static dev_t qseecom_device_no;
+static struct cdev qseecom_cdev;
+
+/* Data structures used in legacy support */
+static void *pil;
+static uint32_t pil_ref_cnt;
+static DEFINE_MUTEX(pil_access_lock);
+
+static DEFINE_MUTEX(send_msg_lock);
+static DEFINE_MUTEX(qsee_bw_mutex);
+static DEFINE_MUTEX(qsee_sfpb_bw_mutex);
+static DEFINE_MUTEX(app_access_lock);
+
+static int qsee_bw_count;
+static int qsee_sfpb_bw_count;
+static struct clk *qseecom_bus_clk;
+static uint32_t qsee_perf_client;
+
+struct qseecom_registered_listener_list {
+	struct list_head                 list;
+	struct qseecom_register_listener_req svc;
+	u8  *sb_reg_req;
+	u8 *sb_virt;
+	s32 sb_phys;
+	size_t sb_length;
+	struct ion_handle *ihandle; /* Retrieve phy addr */
+
+	wait_queue_head_t          rcv_req_wq;
+	int                        rcv_req_flag;
+};
+
+struct qseecom_registered_app_list {
+	struct list_head                 list;
+	u32  app_id;
+	u32  ref_cnt;
+};
+
+struct qseecom_control {
+	struct ion_client *ion_clnt;		/* Ion client */
+	struct list_head  registered_listener_list_head;
+	spinlock_t        registered_listener_list_lock;
+
+	struct list_head  registered_app_list_head;
+	spinlock_t        registered_app_list_lock;
+
+	wait_queue_head_t send_resp_wq;
+	int               send_resp_flag;
+
+	uint32_t          qseos_version;
+};
+
+struct qseecom_client_handle {
+	u32  app_id;
+	u8 *sb_virt;
+	s32 sb_phys;
+	uint32_t user_virt_sb_base;
+	size_t sb_length;
+	struct ion_handle *ihandle;		/* Retrieve phy addr */
+};
+
+struct qseecom_listener_handle {
+	u32               id;
+};
+
+static struct qseecom_control qseecom;
+
+struct qseecom_dev_handle {
+	bool               service;
+	union {
+		struct qseecom_client_handle client;
+		struct qseecom_listener_handle listener;
+	};
+	bool released;
+	int               abort;
+	wait_queue_head_t abort_wq;
+	atomic_t          ioctl_count;
+};
+
+/* Function proto types */
+static int qsee_vote_for_clock(int32_t);
+static void qsee_disable_clock_vote(int32_t);
+
+static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
+		struct qseecom_register_listener_req *svc)
+{
+	struct qseecom_registered_listener_list *ptr;
+	int unique = 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
+		if (ptr->svc.listener_id == svc->listener_id) {
+			pr_err("Service id: %u is already registered\n",
+					ptr->svc.listener_id);
+			unique = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+	return unique;
+}
+
+static struct qseecom_registered_listener_list *__qseecom_find_svc(
+						int32_t listener_id)
+{
+	struct qseecom_registered_listener_list *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(entry, &qseecom.registered_listener_list_head, list)
+	{
+		if (entry->svc.listener_id == listener_id)
+			break;
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+	return entry;
+}
+
+static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
+				struct qseecom_dev_handle *handle,
+				struct qseecom_register_listener_req *listener)
+{
+	int ret = 0;
+	unsigned int flags = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_command_scm_resp resp;
+	ion_phys_addr_t pa;
+
+	/* Get the handle of the shared fd */
+	svc->ihandle = ion_import_fd(qseecom.ion_clnt, listener->ifd_data_fd);
+	if (svc->ihandle == NULL) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
+
+	/* Populate the structure for sending scm call to load image */
+	svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
+							svc->ihandle, flags);
+	svc->sb_phys = pa;
+
+	if (qseecom.qseos_version == QSEOS_VERSION_14) {
+		req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req.listener_id = svc->svc.listener_id;
+		req.sb_len = svc->sb_length;
+		req.sb_ptr = (void *)svc->sb_phys;
+
+		resp.result = QSEOS_RESULT_INCOMPLETE;
+
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &req,
+					sizeof(req), &resp, sizeof(resp));
+		if (ret) {
+			pr_err("qseecom_scm_call failed with err: %d\n", ret);
+			return -EINVAL;
+		}
+
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("Error SB registration req: resp.result = %d\n",
+					resp.result);
+			return -EPERM;
+		}
+	} else {
+		struct qseecom_command cmd;
+		struct qseecom_response resp;
+		struct qse_pr_init_sb_req_s sb_init_req;
+		struct qse_pr_init_sb_rsp_s sb_init_rsp;
+
+		svc->sb_reg_req = kzalloc((sizeof(sb_init_req) +
+					sizeof(sb_init_rsp)), GFP_KERNEL);
+
+		sb_init_req.pr_cmd = TZ_SCHED_CMD_ID_REGISTER_LISTENER;
+		sb_init_req.listener_id = svc->svc.listener_id;
+		sb_init_req.sb_len = svc->sb_length;
+		sb_init_req.sb_ptr = svc->sb_phys;
+
+		memcpy(svc->sb_reg_req, &sb_init_req, sizeof(sb_init_req));
+
+		/* It will always be a new cmd from this method */
+		cmd.cmd_type = TZ_SCHED_CMD_NEW;
+		cmd.sb_in_cmd_addr = (u8 *)(virt_to_phys(svc->sb_reg_req));
+		cmd.sb_in_cmd_len = sizeof(sb_init_req);
+
+		resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
+
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &cmd, sizeof(cmd)
+				, &resp, sizeof(resp));
+
+		if (ret) {
+			pr_err("qseecom_scm_call failed with err: %d\n", ret);
+			return -EINVAL;
+		}
+
+		if (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) {
+			pr_err("SB registration fail resp.cmd_status %d\n",
+							resp.cmd_status);
+			return -EINVAL;
+		}
+		memset(svc->sb_virt, 0, svc->sb_length);
+	}
+	return 0;
+}
+
+static int qseecom_register_listener(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct qseecom_register_listener_req rcvd_lstnr;
+	struct qseecom_registered_listener_list *new_entry;
+
+	ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	data->listener.id = 0;
+	data->service = true;
+	if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
+		pr_err("Service is not unique and is already registered\n");
+		data->released = true;
+		return -EBUSY;
+	}
+
+	new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+	if (!new_entry) {
+		pr_err("kmalloc failed\n");
+		return -ENOMEM;
+	}
+	memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
+	new_entry->rcv_req_flag = 0;
+
+	new_entry->svc.listener_id = rcvd_lstnr.listener_id;
+	new_entry->sb_length = rcvd_lstnr.sb_size;
+	if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
+		pr_err("qseecom_set_sb_memoryfailed\n");
+		kzfree(new_entry);
+		return -ENOMEM;
+	}
+
+	data->listener.id = rcvd_lstnr.listener_id;
+	init_waitqueue_head(&new_entry->rcv_req_wq);
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	return ret;
+}
+
+static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	unsigned long flags;
+	uint32_t unmap_mem = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	struct qseecom_command_scm_resp resp;
+	struct ion_handle *ihandle = NULL;		/* Retrieve phy addr */
+
+	if (qseecom.qseos_version == QSEOS_VERSION_14) {
+		req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
+		req.listener_id = data->listener.id;
+		resp.result = QSEOS_RESULT_INCOMPLETE;
+
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &req,
+					sizeof(req), &resp, sizeof(resp));
+		if (ret) {
+			pr_err("qseecom_scm_call failed with err: %d\n", ret);
+			return ret;
+		}
+
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("SB deregistartion: result=%d\n", resp.result);
+			return -EPERM;
+		}
+	} else {
+		struct qse_pr_init_sb_req_s sb_init_req;
+		struct qseecom_command cmd;
+		struct qseecom_response resp;
+		struct qseecom_registered_listener_list *svc;
+
+		svc = __qseecom_find_svc(data->listener.id);
+		sb_init_req.pr_cmd = TZ_SCHED_CMD_ID_REGISTER_LISTENER;
+		sb_init_req.listener_id = data->listener.id;
+		sb_init_req.sb_len = 0;
+		sb_init_req.sb_ptr = 0;
+
+		memcpy(svc->sb_reg_req, &sb_init_req, sizeof(sb_init_req));
+
+		/* It will always be a new cmd from this method */
+		cmd.cmd_type = TZ_SCHED_CMD_NEW;
+		cmd.sb_in_cmd_addr = (u8 *)(virt_to_phys(svc->sb_reg_req));
+		cmd.sb_in_cmd_len = sizeof(sb_init_req);
+		resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
+
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1, &cmd, sizeof(cmd),
+					&resp, sizeof(resp));
+		if (ret) {
+			pr_err("qseecom_scm_call failed with err: %d\n", ret);
+			return ret;
+		}
+		kzfree(svc->sb_reg_req);
+		if (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) {
+			pr_err("Error with SB initialization\n");
+			return -EPERM;
+		}
+	}
+	data->abort = 1;
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
+			list) {
+		if (ptr_svc->svc.listener_id == data->listener.id) {
+			wake_up_all(&ptr_svc->rcv_req_wq);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_freezable(data->abort_wq,
+				atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr_svc,
+			&qseecom.registered_listener_list_head,
+			list)
+	{
+		if (ptr_svc->svc.listener_id == data->listener.id) {
+			if (ptr_svc->sb_virt) {
+				unmap_mem = 1;
+				ihandle = ptr_svc->ihandle;
+				}
+			list_del(&ptr_svc->list);
+			kzfree(ptr_svc);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	/* Unmap the memory */
+	if (unmap_mem) {
+		if (!IS_ERR_OR_NULL(ihandle)) {
+			ion_unmap_kernel(qseecom.ion_clnt, ihandle);
+			ion_free(qseecom.ion_clnt, ihandle);
+			}
+	}
+	data->released = true;
+	return ret;
+}
+
+static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	ion_phys_addr_t pa;
+	int32_t ret;
+	unsigned int flags = 0;
+	struct qseecom_set_sb_mem_param_req req;
+	uint32_t len;
+
+	/* Copy the relevant information needed for loading the image */
+	if (__copy_from_user(&req, (void __user *)argp, sizeof(req)))
+		return -EFAULT;
+
+	/* Get the handle of the shared fd */
+	data->client.ihandle = ion_import_fd(qseecom.ion_clnt, req.ifd_data_fd);
+	if (IS_ERR_OR_NULL(data->client.ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
+	/* Populate the structure for sending scm call to load image */
+	data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
+							data->client.ihandle,
+							flags);
+	data->client.sb_phys = pa;
+	data->client.sb_length = req.sb_len;
+	data->client.user_virt_sb_base = req.virt_sb_base;
+	return 0;
+}
+
+
+static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
+{
+	int ret;
+	ret = (qseecom.send_resp_flag != 0);
+	return ret || data->abort;
+}
+
+static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	uint32_t lstnr;
+	unsigned long flags;
+	struct qseecom_client_listener_data_irsp send_data_rsp;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+
+
+	while (resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		spin_lock_irqsave(&qseecom.registered_listener_list_lock,
+					flags);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->rcv_req_flag = 1;
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
+				flags);
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_warning("Service requested for does on exist\n");
+			return -ERESTARTSYS;
+		}
+		pr_debug("waking up rcv_req_wq and "
+				"waiting for send_resp_wq\n");
+		if (wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_listener_has_sent_rsp(data))) {
+			pr_warning("Interrupted: exiting send_cmd loop\n");
+			return -ERESTARTSYS;
+		}
+
+		if (data->abort) {
+			pr_err("Aborting listener service %d\n",
+				data->listener.id);
+			return -ENODEV;
+		}
+		qseecom.send_resp_flag = 0;
+		send_data_rsp.qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		send_data_rsp.listener_id  = lstnr ;
+
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1,
+					(const void *)&send_data_rsp,
+					sizeof(send_data_rsp), resp,
+					sizeof(*resp));
+		if (ret) {
+			pr_err("qseecom_scm_call failed with err: %d\n", ret);
+			return ret;
+		}
+		if (resp->result == QSEOS_RESULT_FAILURE) {
+			pr_err("Response result %d not supported\n",
+							resp->result);
+			return -EINVAL;
+		}
+	}
+	return ret;
+}
+
+static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
+{
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	u32 app_id = 0;
+	struct ion_handle *ihandle;	/* Ion handle */
+	struct qseecom_load_img_req load_img_req;
+	int32_t ret;
+	ion_phys_addr_t pa = 0;
+	uint32_t len;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_check_app_ireq req;
+	/* Copy the relevant information needed for loading the image */
+	if (__copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+	/* Vote for the SFPB clock */
+	ret = qsee_vote_for_clock(CLK_SFPB);
+	if (ret)
+		pr_warning("Unable to vote for SFPB clock");
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	memcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
+
+	/*  SCM_CALL  to check if app_id for the mentioned app exists */
+	ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &req,
+				sizeof(struct qseecom_check_app_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to check if app is already loaded failed\n");
+		return -EINVAL;
+	}
+
+	if (resp.result == QSEOS_RESULT_FAILURE)
+		app_id = 0;
+	else
+		app_id = resp.data;
+
+	if (app_id) {
+		pr_warn("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				entry->ref_cnt++;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+	} else {
+		struct qseecom_load_app_ireq load_req;
+
+		pr_warn("App (%s) does not exist, loading apps for first time\n",
+			(char *)(req.app_name));
+		/* Get the handle of the shared fd */
+		ihandle = ion_import_fd(qseecom.ion_clnt,
+					load_img_req.ifd_data_fd);
+		if (IS_ERR_OR_NULL(ihandle)) {
+			pr_err("Ion client could not retrieve the handle\n");
+			qsee_disable_clock_vote(CLK_SFPB);
+			return -ENOMEM;
+		}
+
+		/* Get the physical address of the ION BUF */
+		ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+
+		/* Populate the structure for sending scm call to load image */
+		load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req.mdt_len = load_img_req.mdt_len;
+		load_req.img_len = load_img_req.img_len;
+		load_req.phy_addr = pa;
+
+		/*  SCM_CALL  to load the app and get the app_id back */
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &load_req,
+				sizeof(struct qseecom_load_app_ireq),
+				&resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to load app failed\n");
+			return -EINVAL;
+		}
+
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			qsee_disable_clock_vote(CLK_SFPB);
+			return -EFAULT;
+		}
+
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				if (!IS_ERR_OR_NULL(ihandle))
+					ion_free(qseecom.ion_clnt, ihandle);
+				qsee_disable_clock_vote(CLK_SFPB);
+				return ret;
+			}
+		}
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call failed resp.result unknown, %d\n",
+					resp.result);
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			qsee_disable_clock_vote(CLK_SFPB);
+			return -EFAULT;
+		}
+
+		app_id = resp.data;
+
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			pr_err("kmalloc failed\n");
+			qsee_disable_clock_vote(CLK_SFPB);
+			return -ENOMEM;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+					flags);
+
+		pr_warn("App with id %d (%s) now loaded\n", app_id,
+			(char *)(req.app_name));
+	}
+	data->client.app_id = app_id;
+	load_img_req.app_id = app_id;
+	if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
+		pr_err("copy_to_user failed\n");
+		kzfree(entry);
+		qsee_disable_clock_vote(CLK_SFPB);
+		return -EFAULT;
+	}
+	qsee_disable_clock_vote(CLK_SFPB);
+	return 0;
+}
+
+static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
+{
+	wake_up_all(&qseecom.send_resp_wq);
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_freezable(data->abort_wq,
+					atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			return -ERESTARTSYS;
+			break;
+		}
+	}
+	/* Set unload app */
+	return 1;
+}
+
+static int qseecom_unload_app(struct qseecom_dev_handle *data)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_registered_app_list *ptr_app;
+	uint32_t unload = 0;
+
+	if (qseecom.qseos_version == QSEOS_VERSION_14) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+								list) {
+			if (ptr_app->app_id == data->client.app_id) {
+				if (ptr_app->ref_cnt == 1) {
+					unload = __qseecom_cleanup_app(data);
+					list_del(&ptr_app->list);
+					kzfree(ptr_app);
+					break;
+				} else {
+					ptr_app->ref_cnt--;
+					data->released = true;
+					pr_warn("Can't unload app with id %d (it is inuse)\n",
+							ptr_app->app_id);
+					break;
+				}
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+								flags);
+	}
+	if (!IS_ERR_OR_NULL(data->client.ihandle)) {
+		ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
+		ion_free(qseecom.ion_clnt, data->client.ihandle);
+	}
+
+	if ((unload) && (qseecom.qseos_version == QSEOS_VERSION_14)) {
+		struct qseecom_unload_app_ireq req;
+
+		/* Populate the structure for sending scm call to load image */
+		req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
+		req.app_id = data->client.app_id;
+
+		/* SCM_CALL to unload the app */
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &req,
+				sizeof(struct qseecom_unload_app_ireq),
+				&resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to unload app (id = %d) failed\n",
+							req.app_id);
+			return -EFAULT;
+		} else {
+			pr_warn("App id %d now unloaded\n", req.app_id);
+		}
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd fail err: %d\n",
+						ret);
+				return ret;
+			}
+		}
+	}
+
+	if (qseecom.qseos_version == QSEOS_VERSION_13) {
+		data->abort = 1;
+		wake_up_all(&qseecom.send_resp_wq);
+		while (atomic_read(&data->ioctl_count) > 0) {
+			if (wait_event_freezable(data->abort_wq,
+					atomic_read(&data->ioctl_count) <= 0)) {
+				pr_err("Interrupted from abort\n");
+				ret = -ERESTARTSYS;
+				break;
+			}
+		}
+	}
+	data->released = true;
+	return ret;
+}
+
+static uint32_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
+						uint32_t virt)
+{
+	return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
+}
+
+static int __qseecom_send_cmd_legacy(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+{
+	int ret = 0;
+	unsigned long flags;
+	u32 reqd_len_sb_in = 0;
+	struct qseecom_command cmd;
+	struct qseecom_response resp;
+
+
+	if (req->cmd_req_buf == NULL || req->resp_buf == NULL) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+
+	if (req->cmd_req_len <= 0 ||
+		req->resp_len <= 0 ||
+		req->cmd_req_len > data->client.sb_length ||
+		req->resp_len > data->client.sb_length) {
+		pr_err("cmd buffer length or "
+				"response buffer length not valid\n");
+		return -EINVAL;
+	}
+
+	reqd_len_sb_in = req->cmd_req_len + req->resp_len;
+	if (reqd_len_sb_in > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf and "
+			"resp_buf. Required: %u, Available: %u\n",
+				reqd_len_sb_in, data->client.sb_length);
+		return -ENOMEM;
+	}
+	cmd.cmd_type = TZ_SCHED_CMD_NEW;
+	cmd.sb_in_cmd_addr = (u8 *) data->client.sb_phys;
+	cmd.sb_in_cmd_len = req->cmd_req_len;
+
+	resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
+	resp.sb_in_rsp_addr = (u8 *)data->client.sb_phys + req->cmd_req_len;
+	resp.sb_in_rsp_len = req->resp_len;
+
+	ret = scm_call(SCM_SVC_TZSCHEDULER, 1, (const void *)&cmd,
+					sizeof(cmd), &resp, sizeof(resp));
+
+	if (ret) {
+		pr_err("qseecom_scm_call_legacy failed with err: %d\n", ret);
+		return ret;
+	}
+
+	while (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) {
+		/*
+		 * If cmd is incomplete, get the callback cmd out from SB out
+		 * and put it on the list
+		 */
+		struct qseecom_registered_listener_list *ptr_svc = NULL;
+		/*
+		 * We don't know which service can handle the command. so we
+		 * wake up all blocking services and let them figure out if
+		 * they can handle the given command.
+		 */
+		spin_lock_irqsave(&qseecom.registered_listener_list_lock,
+					flags);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+				ptr_svc->rcv_req_flag = 1;
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+		}
+		spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
+				flags);
+
+		pr_debug("waking up rcv_req_wq and "
+				"waiting for send_resp_wq\n");
+		if (wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_listener_has_sent_rsp(data))) {
+			pr_warning("qseecom Interrupted: exiting send_cmd loop\n");
+			return -ERESTARTSYS;
+		}
+
+		if (data->abort) {
+			pr_err("Aborting driver\n");
+			return -ENODEV;
+		}
+		qseecom.send_resp_flag = 0;
+		cmd.cmd_type = TZ_SCHED_CMD_PENDING;
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1, (const void *)&cmd,
+					sizeof(cmd), &resp, sizeof(resp));
+		if (ret) {
+			pr_err("qseecom_scm_call failed with err: %d\n", ret);
+			return ret;
+		}
+	}
+	return ret;
+}
+
+static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+{
+	int ret = 0;
+	u32 reqd_len_sb_in = 0;
+	struct qseecom_client_send_data_ireq send_data_req;
+	struct qseecom_command_scm_resp resp;
+
+	if (req->cmd_req_buf == NULL || req->resp_buf == NULL) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+
+	if (req->cmd_req_len <= 0 ||
+		req->resp_len <= 0 ||
+		req->cmd_req_len > data->client.sb_length ||
+		req->resp_len > data->client.sb_length) {
+		pr_err("cmd buffer length or "
+				"response buffer length not valid\n");
+		return -EINVAL;
+	}
+
+	reqd_len_sb_in = req->cmd_req_len + req->resp_len;
+	if (reqd_len_sb_in > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf and "
+			"resp_buf. Required: %u, Available: %u\n",
+				reqd_len_sb_in, data->client.sb_length);
+		return -ENOMEM;
+	}
+
+	send_data_req.qsee_cmd_id = QSEOS_CLIENT_SEND_DATA_COMMAND;
+	send_data_req.app_id = data->client.app_id;
+	send_data_req.req_ptr = (void *)(__qseecom_uvirt_to_kphys(data,
+					(uint32_t)req->cmd_req_buf));
+	send_data_req.req_len = req->cmd_req_len;
+	send_data_req.rsp_ptr = (void *)(__qseecom_uvirt_to_kphys(data,
+					(uint32_t)req->resp_buf));
+	send_data_req.rsp_len = req->resp_len;
+
+	ret = scm_call(SCM_SVC_TZSCHEDULER, 1, (const void *) &send_data_req,
+					sizeof(send_data_req),
+					&resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		return ret;
+	}
+
+	if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd failed err: %d\n", ret);
+			return ret;
+		}
+	} else {
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("Response result %d not supported\n",
+							resp.result);
+			ret = -EINVAL;
+		}
+	}
+	return ret;
+}
+
+
+static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	if (qseecom.qseos_version == QSEOS_VERSION_14)
+		ret = __qseecom_send_cmd(data, &req);
+	else
+		ret = __qseecom_send_cmd_legacy(data, &req);
+	if (ret)
+		return ret;
+
+	pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%p\n",
+			req.resp_len, req.resp_buf);
+	return ret;
+}
+
+static int __qseecom_send_cmd_req_clean_up(
+			struct qseecom_send_modfd_cmd_req *req)
+{
+	char *field;
+	uint32_t *update;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req->ifd_data[i].fd > 0) {
+			field = (char *)req->cmd_req_buf +
+					req->ifd_data[i].cmd_buf_offset;
+			update = (uint32_t *) field;
+			*update = 0;
+		}
+	}
+	return ret;
+}
+
+static int __qseecom_update_with_phy_addr(
+			struct qseecom_send_modfd_cmd_req *req)
+{
+	struct ion_handle *ihandle;
+	char *field;
+	uint32_t *update;
+	ion_phys_addr_t pa;
+	int ret = 0;
+	int i = 0;
+	uint32_t length;
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req->ifd_data[i].fd > 0) {
+			/* Get the handle of the shared fd */
+			ihandle = ion_import_fd(qseecom.ion_clnt,
+						req->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = (char *) req->cmd_req_buf +
+						req->ifd_data[i].cmd_buf_offset;
+			update = (uint32_t *) field;
+
+			/* Populate the cmd data structure with the phys_addr */
+			ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &length);
+			if (ret)
+				return -ENOMEM;
+
+			*update = (uint32_t)pa;
+			/* Deallocate the handle */
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+		}
+	}
+	return ret;
+}
+
+static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_send_modfd_cmd_req req;
+	struct qseecom_send_cmd_req send_cmd_req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	send_cmd_req.cmd_req_buf = req.cmd_req_buf;
+	send_cmd_req.cmd_req_len = req.cmd_req_len;
+	send_cmd_req.resp_buf = req.resp_buf;
+	send_cmd_req.resp_len = req.resp_len;
+
+	ret = __qseecom_update_with_phy_addr(&req);
+	if (ret)
+		return ret;
+	if (qseecom.qseos_version == QSEOS_VERSION_14)
+		ret = __qseecom_send_cmd(data, &send_cmd_req);
+	else
+		ret = __qseecom_send_cmd_legacy(data, &send_cmd_req);
+	__qseecom_send_cmd_req_clean_up(&req);
+
+	if (ret)
+		return ret;
+
+	pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%p\n",
+			req.resp_len, req.resp_buf);
+	return ret;
+}
+
+static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
+		struct qseecom_registered_listener_list *svc)
+{
+	int ret;
+	ret = (svc->rcv_req_flag != 0);
+	return ret || data->abort;
+}
+
+static int qseecom_receive_req(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_registered_listener_list *this_lstnr;
+
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	while (1) {
+		if (wait_event_freezable(this_lstnr->rcv_req_wq,
+				__qseecom_listener_has_rcvd_req(data,
+				this_lstnr))) {
+			pr_warning("Interrupted: exiting wait_rcv_req loop\n");
+			/* woken up for different reason */
+			return -ERESTARTSYS;
+		}
+
+		if (data->abort) {
+			pr_err("Aborting driver!\n");
+			return -ENODEV;
+		}
+		this_lstnr->rcv_req_flag = 0;
+		if (qseecom.qseos_version == QSEOS_VERSION_13) {
+			if (*((uint32_t *)this_lstnr->sb_virt) != 0)
+				break;
+		} else {
+			break;
+		}
+	}
+	return ret;
+}
+
+static int qseecom_send_resp(void)
+{
+	qseecom.send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	struct qseecom_qseos_version_req req;
+
+	if (copy_from_user(&req, argp, sizeof(req))) {
+		pr_err("copy_from_user failed");
+		return -EINVAL;
+	}
+	req.qseos_version = qseecom.qseos_version;
+	if (copy_to_user(argp, &req, sizeof(req))) {
+		pr_err("copy_to_user failed");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qsee_vote_for_clock(int32_t clk_type)
+{
+	int ret = 0;
+
+	if (!qsee_perf_client)
+		return ret;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		/* Check if the clk is valid */
+		if (IS_ERR_OR_NULL(qseecom_bus_clk)) {
+			pr_warn("qseecom bus clock is null or error");
+			return -EINVAL;
+		}
+		mutex_lock(&qsee_bw_mutex);
+		if (!qsee_bw_count) {
+			ret = msm_bus_scale_client_update_request(
+					qsee_perf_client, 1);
+			if (ret)
+				pr_err("DFAB Bandwidth req failed (%d)\n",
+								ret);
+			else
+				qsee_bw_count++;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_sfpb_bw_mutex);
+		if (!qsee_sfpb_bw_count) {
+			ret = msm_bus_scale_client_update_request(
+					qsee_perf_client, 2);
+			if (ret)
+				pr_err("SFPB Bandwidth req failed (%d)\n",
+								ret);
+			else
+				qsee_sfpb_bw_count++;
+		}
+		mutex_unlock(&qsee_sfpb_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+	return ret;
+}
+
+static void qsee_disable_clock_vote(int32_t clk_type)
+{
+	int32_t ret = 0;
+
+	if (!qsee_perf_client)
+		return;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		/* Check if the DFAB clk is valid */
+		if (IS_ERR_OR_NULL(qseecom_bus_clk)) {
+			pr_warn("qseecom bus clock is null or error");
+			return;
+		}
+		mutex_lock(&qsee_bw_mutex);
+		if (qsee_bw_count > 0) {
+			if (qsee_bw_count-- == 1) {
+				ret = msm_bus_scale_client_update_request(
+						qsee_perf_client, 0);
+				if (ret)
+					pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			}
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_sfpb_bw_mutex);
+		if (qsee_sfpb_bw_count > 0) {
+			if (qsee_sfpb_bw_count-- == 1) {
+				ret = msm_bus_scale_client_update_request(
+						qsee_perf_client, 0);
+				if (ret)
+					pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			}
+		}
+		mutex_unlock(&qsee_sfpb_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+
+}
+
+static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct ion_handle *ihandle;	/* Ion handle */
+	struct qseecom_load_img_req load_img_req;
+	int ret;
+	int set_cpu_ret = 0;
+	ion_phys_addr_t pa = 0;
+	uint32_t len;
+	struct cpumask mask;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_command_scm_resp resp;
+
+	/* Copy the relevant information needed for loading the image */
+	if (__copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Get the handle of the shared fd */
+	ihandle = ion_import_fd(qseecom.ion_clnt,
+				load_img_req.ifd_data_fd);
+	if (IS_ERR_OR_NULL(ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+
+	/* Populate the structure for sending scm call to load image */
+	load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+	load_req.mdt_len = load_img_req.mdt_len;
+	load_req.img_len = load_img_req.img_len;
+	load_req.phy_addr = pa;
+
+	/* SCM_CALL tied to Core0 */
+	mask = CPU_MASK_CPU0;
+	set_cpu_ret = set_cpus_allowed_ptr(current, &mask);
+	if (set_cpu_ret) {
+		pr_err("set_cpus_allowed_ptr failed : ret %d\n",
+				set_cpu_ret);
+		ret = -EFAULT;
+		goto qseecom_load_external_elf_set_cpu_err;
+	}
+
+	/*  SCM_CALL to load the external elf */
+	ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &load_req,
+			sizeof(struct qseecom_load_app_ireq),
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto qseecom_load_external_elf_scm_err;
+	}
+
+	if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+	} else {
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call to load image failed resp.result =%d\n",
+						resp.result);
+			ret = -EFAULT;
+		}
+	}
+
+qseecom_load_external_elf_scm_err:
+	/* Restore the CPU mask */
+	mask = CPU_MASK_ALL;
+	set_cpu_ret = set_cpus_allowed_ptr(current, &mask);
+	if (set_cpu_ret) {
+		pr_err("set_cpus_allowed_ptr failed to restore mask: ret %d\n",
+				set_cpu_ret);
+		ret = -EFAULT;
+	}
+
+qseecom_load_external_elf_set_cpu_err:
+	/* Deallocate the handle */
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+
+	return ret;
+}
+
+static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	int set_cpu_ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_unload_app_ireq req;
+	struct cpumask mask;
+
+	/* Populate the structure for sending scm call to unload image */
+	req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
+
+	/* SCM_CALL tied to Core0 */
+	mask = CPU_MASK_CPU0;
+	ret = set_cpus_allowed_ptr(current, &mask);
+	if (ret) {
+		pr_err("set_cpus_allowed_ptr failed : ret %d\n",
+				ret);
+		return -EFAULT;
+	}
+
+	/* SCM_CALL to unload the external elf */
+	ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &req,
+			sizeof(struct qseecom_unload_app_ireq),
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto qseecom_unload_external_elf_scm_err;
+	}
+	if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd fail err: %d\n",
+					ret);
+	} else {
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call to unload image failed resp.result =%d\n",
+						resp.result);
+			ret = -EFAULT;
+		}
+	}
+
+qseecom_unload_external_elf_scm_err:
+	/* Restore the CPU mask */
+	mask = CPU_MASK_ALL;
+	set_cpu_ret = set_cpus_allowed_ptr(current, &mask);
+	if (set_cpu_ret) {
+		pr_err("set_cpus_allowed_ptr failed to restore mask: ret %d\n",
+				set_cpu_ret);
+		ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+static long qseecom_ioctl(struct file *file, unsigned cmd,
+		unsigned long arg)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data = file->private_data;
+	void __user *argp = (void __user *) arg;
+
+	if (data->abort) {
+		pr_err("Aborting qseecom driver\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+		pr_debug("ioctl register_listener_req()\n");
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_register_listener(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret)
+			pr_err("failed qseecom_register_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
+		pr_debug("ioctl unregister_listener_req()\n");
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unregister_listener(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret)
+			pr_err("failed qseecom_unregister_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_REQ: {
+		/* Only one client allowed here at a time */
+		mutex_lock(&send_msg_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&send_msg_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: {
+		/* Only one client allowed here at a time */
+		mutex_lock(&send_msg_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_modfd_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&send_msg_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_RECEIVE_REQ: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_receive_req(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret)
+			pr_err("failed qseecom_receive_req: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_RESP_REQ: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_resp();
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret)
+			pr_err("failed qseecom_send_resp: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+		ret = qseecom_set_client_mem_param(data, argp);
+		if (ret)
+			pr_err("failed Qqseecom_set_mem_param request: %d\n",
+								ret);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_APP_REQ: {
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_app(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_app(data);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_qseos_version(data, argp);
+		if (ret)
+			pr_err("qseecom_get_qseos_version: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
+		atomic_inc(&data->ioctl_count);
+		ret = qsee_vote_for_clock(CLK_DFAB);
+		if (ret)
+			pr_err("Failed to vote for DFAB clock%d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
+		atomic_inc(&data->ioctl_count);
+		qsee_disable_clock_vote(CLK_DFAB);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+		data->released = true;
+		if (qseecom.qseos_version == QSEOS_VERSION_13) {
+			pr_err("Loading External elf image unsupported in rev 0x13\n");
+			ret = -EINVAL;
+			break;
+		}
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_external_elf(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_external_elf request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+		data->released = true;
+		if (qseecom.qseos_version == QSEOS_VERSION_13) {
+			pr_err("Unloading External elf image unsupported in rev 0x13\n");
+			ret = -EINVAL;
+			break;
+		}
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_external_elf(data);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int qseecom_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		pr_err("kmalloc failed\n");
+		return -ENOMEM;
+	}
+	file->private_data = data;
+	data->abort = 0;
+	data->service = false;
+	data->released = false;
+	init_waitqueue_head(&data->abort_wq);
+	atomic_set(&data->ioctl_count, 0);
+	if (qseecom.qseos_version == QSEOS_VERSION_13) {
+		int pil_error;
+		mutex_lock(&pil_access_lock);
+		if (pil_ref_cnt == 0) {
+			pil = pil_get("tzapps");
+			if (IS_ERR(pil)) {
+				pr_err("Playready PIL image load failed\n");
+				pil_error = PTR_ERR(pil);
+				pil = NULL;
+				pr_debug("tzapps image load FAILED\n");
+				mutex_unlock(&pil_access_lock);
+				return pil_error;
+			}
+		}
+		pil_ref_cnt++;
+		mutex_unlock(&pil_access_lock);
+	}
+	return ret;
+}
+
+static int qseecom_release(struct inode *inode, struct file *file)
+{
+	struct qseecom_dev_handle *data = file->private_data;
+	int ret = 0;
+
+	if (data->released == false) {
+		pr_warn("data->released == false\n");
+		if (data->service)
+			ret = qseecom_unregister_listener(data);
+		else
+			ret = qseecom_unload_app(data);
+		if (ret) {
+			pr_err("Close failed\n");
+			return ret;
+		}
+	}
+	if (qseecom.qseos_version == QSEOS_VERSION_13) {
+		mutex_lock(&pil_access_lock);
+		if (pil_ref_cnt == 1)
+			pil_put(pil);
+		pil_ref_cnt--;
+		mutex_unlock(&pil_access_lock);
+	}
+	kfree(data);
+	qsee_disable_clock_vote(CLK_DFAB);
+
+	return ret;
+}
+
+static const struct file_operations qseecom_fops = {
+		.owner = THIS_MODULE,
+		.unlocked_ioctl = qseecom_ioctl,
+		.open = qseecom_open,
+		.release = qseecom_release
+};
+
+static int __devinit qseecom_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct device *class_dev;
+	char qsee_not_legacy = 0;
+	struct msm_bus_scale_pdata *qseecom_platform_support;
+	uint32_t system_call_id = QSEOS_CHECK_VERSION_CMD;
+
+	qsee_bw_count = 0;
+	qseecom_bus_clk = NULL;
+	qsee_perf_client = 0;
+
+	rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", rc);
+		return rc;
+	}
+
+	driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
+	if (IS_ERR(driver_class)) {
+		rc = -ENOMEM;
+		pr_err("class_create failed %d\n", rc);
+		goto unregister_chrdev_region;
+	}
+
+	class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
+			QSEECOM_DEV);
+	if (!class_dev) {
+		pr_err("class_device_create failed %d\n", rc);
+		rc = -ENOMEM;
+		goto class_destroy;
+	}
+
+	cdev_init(&qseecom_cdev, &qseecom_fops);
+	qseecom_cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&qseecom_cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d\n", rc);
+		goto err;
+	}
+
+	INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
+	spin_lock_init(&qseecom.registered_listener_list_lock);
+	INIT_LIST_HEAD(&qseecom.registered_app_list_head);
+	spin_lock_init(&qseecom.registered_app_list_lock);
+	init_waitqueue_head(&qseecom.send_resp_wq);
+	qseecom.send_resp_flag = 0;
+
+	rc = scm_call(6, 1, &system_call_id, sizeof(system_call_id),
+				&qsee_not_legacy, sizeof(qsee_not_legacy));
+	if (rc) {
+		pr_err("Failed to retrieve QSEE version information %d\n", rc);
+		goto err;
+	}
+	if (qsee_not_legacy)
+		qseecom.qseos_version = QSEOS_VERSION_14;
+	else {
+		qseecom.qseos_version = QSEOS_VERSION_13;
+		pil = NULL;
+		pil_ref_cnt = 0;
+	}
+	/* Create ION msm client */
+	qseecom.ion_clnt = msm_ion_client_create(0x03, "qseecom-kernel");
+	if (qseecom.ion_clnt == NULL) {
+		pr_err("Ion client cannot be created\n");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	/* register client for bus scaling */
+	qseecom_platform_support = (struct msm_bus_scale_pdata *)
+					pdev->dev.platform_data;
+	qsee_perf_client = msm_bus_scale_register_client(
+					qseecom_platform_support);
+	if (!qsee_perf_client) {
+		pr_err("Unable to register bus client\n");
+	} else {
+		qseecom_bus_clk = clk_get(class_dev, "bus_clk");
+		if (IS_ERR(qseecom_bus_clk)) {
+			qseecom_bus_clk = NULL;
+		} else if (qseecom_bus_clk != NULL) {
+			pr_debug("Enabled DFAB clock");
+			clk_set_rate(qseecom_bus_clk, 64000000);
+		}
+	}
+	return 0;
+
+err:
+	device_destroy(driver_class, qseecom_device_no);
+class_destroy:
+	class_destroy(driver_class);
+unregister_chrdev_region:
+	unregister_chrdev_region(qseecom_device_no, 1);
+	return rc;
+}
+
+static int __devinit qseecom_remove(struct platform_device *pdev)
+{
+	if (pdev->dev.platform_data != NULL)
+		msm_bus_scale_unregister_client(qsee_perf_client);
+	return 0;
+};
+
+static struct platform_driver qseecom_plat_driver = {
+	.probe = qseecom_probe,
+	.remove = qseecom_remove,
+	.driver = {
+		.name = "qseecom",
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __devinit qseecom_init(void)
+{
+	return platform_driver_register(&qseecom_plat_driver);
+}
+
+static void __devexit qseecom_exit(void)
+{
+	clk_put(qseecom_bus_clk);
+
+	device_destroy(driver_class, qseecom_device_no);
+	class_destroy(driver_class);
+	unregister_chrdev_region(qseecom_device_no, 1);
+	ion_client_destroy(qseecom.ion_clnt);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Secure Execution Environment Communicator");
+
+module_init(qseecom_init);
+module_exit(qseecom_exit);
diff --git a/drivers/misc/qseecom_legacy.h b/drivers/misc/qseecom_legacy.h
new file mode 100644
index 0000000..66f87e9
--- /dev/null
+++ b/drivers/misc/qseecom_legacy.h
@@ -0,0 +1,79 @@
+/* Qualcomm Secure Execution Environment Communicator (QSEECOM) driver
+ *
+ * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QSEECOM_LEGACY_H_
+#define __QSEECOM_LEGACY_H_
+
+#include <linux/types.h>
+
+#define TZ_SCHED_CMD_ID_REGISTER_LISTENER    0x04
+
+enum tz_sched_cmd_type {
+	TZ_SCHED_CMD_INVALID = 0,
+	TZ_SCHED_CMD_NEW,      /* New TZ Scheduler Command */
+	TZ_SCHED_CMD_PENDING,  /* Pending cmd...sched will restore stack */
+	TZ_SCHED_CMD_COMPLETE, /* TZ sched command is complete */
+	TZ_SCHED_CMD_MAX     = 0x7FFFFFFF
+};
+
+enum tz_sched_cmd_status {
+	TZ_SCHED_STATUS_INCOMPLETE = 0,
+	TZ_SCHED_STATUS_COMPLETE,
+	TZ_SCHED_STATUS_MAX  = 0x7FFFFFFF
+};
+/* Command structure for initializing shared buffers */
+__packed struct qse_pr_init_sb_req_s {
+	/* First 4 bytes should always be command id */
+	uint32_t                  pr_cmd;
+	/* Pointer to the physical location of sb buffer */
+	uint32_t                  sb_ptr;
+	/* length of shared buffer */
+	uint32_t                  sb_len;
+	uint32_t                  listener_id;
+};
+
+__packed struct qse_pr_init_sb_rsp_s {
+	/* First 4 bytes should always be command id */
+	uint32_t                  pr_cmd;
+	/* Return code, 0 for success, Approp error code otherwise */
+	int32_t                   ret;
+};
+
+/*
+ * struct QSEECom_command - QSECom command buffer
+ * @cmd_type: value from enum tz_sched_cmd_type
+ * @sb_in_cmd_addr: points to physical location of command
+ *                buffer
+ * @sb_in_cmd_len: length of command buffer
+ */
+__packed struct qseecom_command {
+	uint32_t               cmd_type;
+	uint8_t                *sb_in_cmd_addr;
+	uint32_t               sb_in_cmd_len;
+};
+
+/*
+ * struct QSEECom_response - QSECom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_rsp_len: length of command response
+ */
+__packed struct qseecom_response {
+	uint32_t                 cmd_status;
+	uint8_t                  *sb_in_rsp_addr;
+	uint32_t                 sb_in_rsp_len;
+};
+
+#endif /* __QSEECOM_LEGACY_H_ */
diff --git a/drivers/misc/smsc_hub.c b/drivers/misc/smsc_hub.c
new file mode 100644
index 0000000..bde25d9
--- /dev/null
+++ b/drivers/misc/smsc_hub.c
@@ -0,0 +1,372 @@
+/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/smsc3503.h>
+#include <linux/module.h>
+#include <mach/msm_xo.h>
+
+#define SMSC3503_I2C_ADDR 0x08
+#define SMSC_GSBI_I2C_BUS_ID 10
+static const unsigned short normal_i2c[] = {
+SMSC3503_I2C_ADDR, I2C_CLIENT_END };
+
+struct hsic_hub {
+	struct regulator *hsic_hub_reg;
+	struct device *dev;
+	struct i2c_client *client;
+	struct msm_xo_voter *xo_handle;
+};
+static struct hsic_hub *smsc_hub;
+
+/* APIs for setting/clearing bits and for reading/writing values */
+static inline int hsic_hub_get_u8(struct i2c_client *client, u8 reg)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0)
+		pr_err("%s:i2c_read8 failed\n", __func__);
+	return ret;
+}
+
+static inline int hsic_hub_get_u16(struct i2c_client *client, u8 reg)
+{
+	int ret;
+
+	ret = i2c_smbus_read_word_data(client, reg);
+	if (ret < 0)
+		pr_err("%s:i2c_read16 failed\n", __func__);
+	return ret;
+}
+
+static inline int hsic_hub_write_word_data(struct i2c_client *client, u8 reg,
+						u16 value)
+{
+	int ret;
+
+	ret = i2c_smbus_write_word_data(client, reg, value);
+	if (ret)
+		pr_err("%s:i2c_write16 failed\n", __func__);
+	return ret;
+}
+
+static inline int hsic_hub_write_byte_data(struct i2c_client *client, u8 reg,
+						u8 value)
+{
+	int ret;
+
+	ret = i2c_smbus_write_byte_data(client, reg, value);
+	if (ret)
+		pr_err("%s:i2c_write_byte_data failed\n", __func__);
+	return ret;
+}
+
+static inline int hsic_hub_set_bits(struct i2c_client *client, u8 reg,
+					u8 value)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0) {
+		pr_err("%s:i2c_read_byte_data failed\n", __func__);
+		return ret;
+	}
+	return i2c_smbus_write_byte_data(client, reg, (ret | value));
+}
+
+static inline int hsic_hub_clear_bits(struct i2c_client *client, u8 reg,
+					u8 value)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0) {
+		pr_err("%s:i2c_read_byte_data failed\n", __func__);
+		return ret;
+	}
+	return i2c_smbus_write_byte_data(client, reg, (ret & ~value));
+}
+
+static int i2c_hsic_hub_probe(struct i2c_client *client,
+				const struct i2c_device_id *id)
+{
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+				     I2C_FUNC_SMBUS_WORD_DATA))
+		return -EIO;
+
+	/* CONFIG_N bit in SP_ILOCK register has to be set before changing
+	 * other registers to change default configuration of hsic hub.
+	 */
+	hsic_hub_set_bits(client, SMSC3503_SP_ILOCK, CONFIG_N);
+
+	/* Can change default configuartion like VID,PID, strings etc
+	 * by writing new values to hsic hub registers.
+	 */
+	hsic_hub_write_word_data(client, SMSC3503_VENDORID, 0x05C6);
+
+	/* CONFIG_N bit in SP_ILOCK register has to be cleared for new
+	 * values in registers to be effective after writing to
+	 * other registers.
+	 */
+	hsic_hub_clear_bits(client, SMSC3503_SP_ILOCK, CONFIG_N);
+
+	return 0;
+}
+
+static int i2c_hsic_hub_remove(struct i2c_client *client)
+{
+	return 0;
+}
+
+static const struct i2c_device_id hsic_hub_id[] = {
+	{"i2c_hsic_hub", 0},
+	{}
+};
+MODULE_DEVICE_TABLE(i2c, hsichub_id);
+
+static struct i2c_driver hsic_hub_driver = {
+	.driver = {
+		.name = "i2c_hsic_hub",
+	},
+	.probe    = i2c_hsic_hub_probe,
+	.remove   = i2c_hsic_hub_remove,
+	.id_table = hsic_hub_id,
+};
+
+#define HSIC_HUB_VDD_VOL_MIN	1650000 /* uV */
+#define HSIC_HUB_VDD_VOL_MAX	1950000 /* uV */
+#define HSIC_HUB_VDD_LOAD	36000	/* uA */
+static int __devinit smsc_hub_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	const struct smsc_hub_platform_data *pdata;
+	struct i2c_adapter *i2c_adap;
+	struct i2c_board_info i2c_info;
+
+	if (!pdev->dev.platform_data) {
+		dev_err(&pdev->dev, "No platform data\n");
+		return -ENODEV;
+	}
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata->hub_reset)
+		return -EINVAL;
+
+	smsc_hub = kzalloc(sizeof(*smsc_hub), GFP_KERNEL);
+	if (!smsc_hub)
+		return -ENOMEM;
+
+	smsc_hub->hsic_hub_reg = regulator_get(&pdev->dev, "EXT_HUB_VDDIO");
+	if (IS_ERR(smsc_hub->hsic_hub_reg)) {
+		dev_err(&pdev->dev, "unable to get ext hub vddcx\n");
+		ret = PTR_ERR(smsc_hub->hsic_hub_reg);
+		goto free_mem;
+	}
+
+	ret = gpio_request(pdata->hub_reset, "HSIC_HUB_RESET_GPIO");
+	if (ret < 0) {
+		dev_err(&pdev->dev, "gpio request failed for GPIO%d\n",
+							pdata->hub_reset);
+		goto gpio_req_fail;
+	}
+
+	ret = regulator_set_voltage(smsc_hub->hsic_hub_reg,
+			HSIC_HUB_VDD_VOL_MIN,
+			HSIC_HUB_VDD_VOL_MAX);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to set the voltage"
+				"for hsic hub reg\n");
+		goto reg_set_voltage_fail;
+	}
+
+	ret = regulator_set_optimum_mode(smsc_hub->hsic_hub_reg,
+				HSIC_HUB_VDD_LOAD);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Unable to set optimum mode of regulator:"
+							"VDDCX\n");
+		goto reg_optimum_mode_fail;
+	}
+
+	ret = regulator_enable(smsc_hub->hsic_hub_reg);
+	if (ret) {
+		dev_err(&pdev->dev, "unable to enable ext hub vddcx\n");
+		goto reg_enable_fail;
+	}
+
+	smsc_hub->xo_handle = msm_xo_get(MSM_XO_TCXO_D1, "hsic_hub");
+	if (IS_ERR(smsc_hub->xo_handle)) {
+		dev_err(&pdev->dev, "not able to get the handle"
+					 "for TCXO D1 buffer\n");
+			goto disable_regulator;
+	}
+
+	ret = msm_xo_mode_vote(smsc_hub->xo_handle, MSM_XO_MODE_ON);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to vote for TCXO"
+			"D1 buffer\n");
+		goto xo_vote_fail;
+	}
+
+	gpio_direction_output(pdata->hub_reset, 0);
+	/* Hub reset should be asserted for minimum 2microsec
+	 * before deasserting.
+	 */
+	udelay(5);
+	gpio_direction_output(pdata->hub_reset, 1);
+
+	ret = i2c_add_driver(&hsic_hub_driver);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to add I2C hsic_hub_driver\n");
+		goto i2c_add_fail;
+	}
+	usleep_range(10000, 12000);
+	i2c_adap = i2c_get_adapter(SMSC_GSBI_I2C_BUS_ID);
+
+	if (!i2c_adap) {
+		dev_err(&pdev->dev, "failed to get i2c adapter\n");
+		i2c_del_driver(&hsic_hub_driver);
+		goto i2c_add_fail;
+	}
+
+	memset(&i2c_info, 0, sizeof(struct i2c_board_info));
+	strlcpy(i2c_info.type, "i2c_hsic_hub", I2C_NAME_SIZE);
+
+	smsc_hub->client = i2c_new_probed_device(i2c_adap, &i2c_info,
+						   normal_i2c, NULL);
+	i2c_put_adapter(i2c_adap);
+	if (!smsc_hub->client)
+		dev_err(&pdev->dev, "failed to connect to smsc_hub"
+			 "through I2C\n");
+
+i2c_add_fail:
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	return 0;
+
+xo_vote_fail:
+	msm_xo_put(smsc_hub->xo_handle);
+disable_regulator:
+	regulator_disable(smsc_hub->hsic_hub_reg);
+reg_enable_fail:
+	regulator_set_optimum_mode(smsc_hub->hsic_hub_reg, 0);
+reg_optimum_mode_fail:
+	regulator_set_voltage(smsc_hub->hsic_hub_reg, 0,
+				HSIC_HUB_VDD_VOL_MIN);
+reg_set_voltage_fail:
+	gpio_free(pdata->hub_reset);
+gpio_req_fail:
+	regulator_put(smsc_hub->hsic_hub_reg);
+free_mem:
+	kfree(smsc_hub);
+
+	return ret;
+}
+
+static int smsc_hub_remove(struct platform_device *pdev)
+{
+	const struct smsc_hub_platform_data *pdata;
+
+	pdata = pdev->dev.platform_data;
+	if (smsc_hub->client) {
+		i2c_unregister_device(smsc_hub->client);
+		smsc_hub->client = NULL;
+		i2c_del_driver(&hsic_hub_driver);
+	}
+	pm_runtime_disable(&pdev->dev);
+	msm_xo_put(smsc_hub->xo_handle);
+
+	regulator_disable(smsc_hub->hsic_hub_reg);
+	regulator_set_optimum_mode(smsc_hub->hsic_hub_reg, 0);
+	regulator_set_voltage(smsc_hub->hsic_hub_reg, 0,
+				HSIC_HUB_VDD_VOL_MIN);
+	gpio_free(pdata->hub_reset);
+	regulator_put(smsc_hub->hsic_hub_reg);
+	kfree(smsc_hub);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int msm_smsc_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "SMSC HUB runtime idle\n");
+
+	return 0;
+}
+
+static int smsc_hub_lpm_enter(struct device *dev)
+{
+	int ret;
+
+	ret = msm_xo_mode_vote(smsc_hub->xo_handle, MSM_XO_MODE_OFF);
+	if (ret) {
+		pr_err("%s: failed to devote for TCXO"
+			"D1 buffer%d\n", __func__, ret);
+	}
+	return ret;
+}
+
+static int smsc_hub_lpm_exit(struct device *dev)
+{
+	int ret;
+
+	ret = msm_xo_mode_vote(smsc_hub->xo_handle, MSM_XO_MODE_ON);
+	if (ret) {
+		pr_err("%s: failed to vote for TCXO"
+			"D1 buffer%d\n", __func__, ret);
+	}
+	return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static const struct dev_pm_ops smsc_hub_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(smsc_hub_lpm_enter, smsc_hub_lpm_exit)
+	SET_RUNTIME_PM_OPS(smsc_hub_lpm_enter, smsc_hub_lpm_exit,
+				msm_smsc_runtime_idle)
+};
+#endif
+
+static struct platform_driver smsc_hub_driver = {
+	.driver = {
+		.name = "msm_smsc_hub",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+		.pm = &smsc_hub_dev_pm_ops,
+#endif
+	},
+	.remove = smsc_hub_remove,
+};
+
+static int __init smsc_hub_init(void)
+{
+	return platform_driver_probe(&smsc_hub_driver, smsc_hub_probe);
+}
+
+static void __exit smsc_hub_exit(void)
+{
+	platform_driver_unregister(&smsc_hub_driver);
+}
+subsys_initcall(smsc_hub_init);
+module_exit(smsc_hub_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SMSC HSIC HUB driver");
diff --git a/drivers/misc/tsif.c b/drivers/misc/tsif.c
new file mode 100644
index 0000000..2b09d7c
--- /dev/null
+++ b/drivers/misc/tsif.c
@@ -0,0 +1,1580 @@
+/*
+ * TSIF Driver
+ *
+ * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>       /* Needed by all modules */
+#include <linux/kernel.h>       /* Needed for KERN_INFO */
+#include <linux/init.h>         /* Needed for the macros */
+#include <linux/err.h>          /* IS_ERR etc. */
+#include <linux/platform_device.h>
+
+#include <linux/ioport.h>       /* XXX_mem_region */
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>  /* dma_XXX */
+#include <linux/delay.h>        /* msleep */
+
+#include <linux/io.h>             /* ioXXX */
+#include <linux/uaccess.h>        /* copy_from_user */
+#include <linux/clk.h>
+#include <linux/wakelock.h>
+#include <linux/tsif_api.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>          /* kfree, kzalloc */
+#include <linux/gpio.h>
+
+#include <mach/dma.h>
+#include <mach/msm_tsif.h>
+
+/*
+ * TSIF register offsets
+ */
+#define TSIF_STS_CTL_OFF               (0x0)
+#define TSIF_TIME_LIMIT_OFF            (0x4)
+#define TSIF_CLK_REF_OFF               (0x8)
+#define TSIF_LPBK_FLAGS_OFF            (0xc)
+#define TSIF_LPBK_DATA_OFF            (0x10)
+#define TSIF_TEST_CTL_OFF             (0x14)
+#define TSIF_TEST_MODE_OFF            (0x18)
+#define TSIF_TEST_RESET_OFF           (0x1c)
+#define TSIF_TEST_EXPORT_OFF          (0x20)
+#define TSIF_TEST_CURRENT_OFF         (0x24)
+
+#define TSIF_DATA_PORT_OFF            (0x100)
+
+/* bits for TSIF_STS_CTL register */
+#define TSIF_STS_CTL_EN_IRQ       (1 << 28)
+#define TSIF_STS_CTL_PACK_AVAIL   (1 << 27)
+#define TSIF_STS_CTL_1ST_PACKET   (1 << 26)
+#define TSIF_STS_CTL_OVERFLOW     (1 << 25)
+#define TSIF_STS_CTL_LOST_SYNC    (1 << 24)
+#define TSIF_STS_CTL_TIMEOUT      (1 << 23)
+#define TSIF_STS_CTL_INV_SYNC     (1 << 21)
+#define TSIF_STS_CTL_INV_NULL     (1 << 20)
+#define TSIF_STS_CTL_INV_ERROR    (1 << 19)
+#define TSIF_STS_CTL_INV_ENABLE   (1 << 18)
+#define TSIF_STS_CTL_INV_DATA     (1 << 17)
+#define TSIF_STS_CTL_INV_CLOCK    (1 << 16)
+#define TSIF_STS_CTL_SPARE        (1 << 15)
+#define TSIF_STS_CTL_EN_NULL      (1 << 11)
+#define TSIF_STS_CTL_EN_ERROR     (1 << 10)
+#define TSIF_STS_CTL_LAST_BIT     (1 <<  9)
+#define TSIF_STS_CTL_EN_TIME_LIM  (1 <<  8)
+#define TSIF_STS_CTL_EN_TCR       (1 <<  7)
+#define TSIF_STS_CTL_TEST_MODE    (3 <<  5)
+#define TSIF_STS_CTL_EN_DM        (1 <<  4)
+#define TSIF_STS_CTL_STOP         (1 <<  3)
+#define TSIF_STS_CTL_START        (1 <<  0)
+
+/*
+ * Data buffering parameters
+ *
+ * Data stored in cyclic buffer;
+ *
+ * Data organized in chunks of packets.
+ * One chunk processed at a time by the data mover
+ *
+ */
+#define TSIF_PKTS_IN_CHUNK_DEFAULT  (16)  /**< packets in one DM chunk */
+#define TSIF_CHUNKS_IN_BUF_DEFAULT   (8)
+#define TSIF_PKTS_IN_CHUNK        (tsif_device->pkts_per_chunk)
+#define TSIF_CHUNKS_IN_BUF        (tsif_device->chunks_per_buf)
+#define TSIF_PKTS_IN_BUF          (TSIF_PKTS_IN_CHUNK * TSIF_CHUNKS_IN_BUF)
+#define TSIF_BUF_SIZE             (TSIF_PKTS_IN_BUF * TSIF_PKT_SIZE)
+#define TSIF_MAX_ID               1
+
+#define ROW_RESET                 (MSM_CLK_CTL_BASE + 0x214)
+#define GLBL_CLK_ENA              (MSM_CLK_CTL_BASE + 0x000)
+#define CLK_HALT_STATEB           (MSM_CLK_CTL_BASE + 0x104)
+#define TSIF_NS_REG               (MSM_CLK_CTL_BASE + 0x0b4)
+#define TV_NS_REG                 (MSM_CLK_CTL_BASE + 0x0bc)
+
+/* used to create debugfs entries */
+static const struct {
+	const char *name;
+	mode_t mode;
+	int offset;
+} debugfs_tsif_regs[] = {
+	{"sts_ctl",      S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
+	{"time_limit",   S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
+	{"clk_ref",      S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
+	{"lpbk_flags",   S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
+	{"lpbk_data",    S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
+	{"test_ctl",     S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
+	{"test_mode",    S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
+	{"test_reset",             S_IWUSR, TSIF_TEST_RESET_OFF},
+	{"test_export",  S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
+	{"test_current", S_IRUGO,           TSIF_TEST_CURRENT_OFF},
+	{"data_port",    S_IRUSR,           TSIF_DATA_PORT_OFF},
+};
+
+/* structures for Data Mover */
+struct tsif_dmov_cmd {
+	dmov_box box;
+	dma_addr_t box_ptr;
+};
+
+struct msm_tsif_device;
+
+struct tsif_xfer {
+	struct msm_dmov_cmd hdr;
+	struct msm_tsif_device *tsif_device;
+	int busy;
+	int wi;   /**< set devices's write index after xfer */
+};
+
+struct msm_tsif_device {
+	struct list_head devlist;
+	struct platform_device *pdev;
+	struct resource *memres;
+	void __iomem *base;
+	unsigned int irq;
+	int mode;
+	u32 time_limit;
+	enum tsif_state state;
+	struct wake_lock wake_lock;
+	/* clocks */
+	struct clk *tsif_clk;
+	struct clk *tsif_pclk;
+	struct clk *tsif_ref_clk;
+	/* debugfs */
+	struct dentry *dent_tsif;
+	struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+	struct dentry *debugfs_gpio;
+	struct dentry *debugfs_action;
+	struct dentry *debugfs_dma;
+	struct dentry *debugfs_databuf;
+	struct debugfs_blob_wrapper blob_wrapper_databuf;
+	/* DMA related */
+	int dma;
+	int crci;
+	void *data_buffer;
+	dma_addr_t data_buffer_dma;
+	u32 pkts_per_chunk;
+	u32 chunks_per_buf;
+	int ri;
+	int wi;
+	int dmwi;  /**< DataMover write index */
+	struct tsif_dmov_cmd *dmov_cmd[2];
+	dma_addr_t dmov_cmd_dma[2];
+	struct tsif_xfer xfer[2];
+	struct tasklet_struct dma_refill;
+	/* statistics */
+	u32 stat_rx;
+	u32 stat_overflow;
+	u32 stat_lost_sync;
+	u32 stat_timeout;
+	u32 stat_dmov_err;
+	u32 stat_soft_drop;
+	int stat_ifi; /* inter frame interval */
+	u32 stat0, stat1;
+	/* client */
+	void *client_data;
+	void (*client_notify)(void *client_data);
+};
+
+/* ===clocks begin=== */
+
+static void tsif_put_clocks(struct msm_tsif_device *tsif_device)
+{
+	if (tsif_device->tsif_clk) {
+		clk_put(tsif_device->tsif_clk);
+		tsif_device->tsif_clk = NULL;
+	}
+	if (tsif_device->tsif_pclk) {
+		clk_put(tsif_device->tsif_pclk);
+		tsif_device->tsif_pclk = NULL;
+	}
+
+	if (tsif_device->tsif_ref_clk) {
+		clk_put(tsif_device->tsif_ref_clk);
+		tsif_device->tsif_ref_clk = NULL;
+	}
+}
+
+static int tsif_get_clocks(struct msm_tsif_device *tsif_device)
+{
+	struct msm_tsif_platform_data *pdata =
+		tsif_device->pdev->dev.platform_data;
+	int rc = 0;
+
+	if (pdata->tsif_clk) {
+		tsif_device->tsif_clk = clk_get(&tsif_device->pdev->dev,
+						pdata->tsif_clk);
+		if (IS_ERR(tsif_device->tsif_clk)) {
+			dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
+				pdata->tsif_clk);
+			rc = PTR_ERR(tsif_device->tsif_clk);
+			tsif_device->tsif_clk = NULL;
+			goto ret;
+		}
+	}
+	if (pdata->tsif_pclk) {
+		tsif_device->tsif_pclk = clk_get(&tsif_device->pdev->dev,
+						 pdata->tsif_pclk);
+		if (IS_ERR(tsif_device->tsif_pclk)) {
+			dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
+				pdata->tsif_pclk);
+			rc = PTR_ERR(tsif_device->tsif_pclk);
+			tsif_device->tsif_pclk = NULL;
+			goto ret;
+		}
+	}
+	if (pdata->tsif_ref_clk) {
+		tsif_device->tsif_ref_clk = clk_get(&tsif_device->pdev->dev,
+						    pdata->tsif_ref_clk);
+		if (IS_ERR(tsif_device->tsif_ref_clk)) {
+			dev_err(&tsif_device->pdev->dev, "failed to get %s\n",
+				pdata->tsif_ref_clk);
+			rc = PTR_ERR(tsif_device->tsif_ref_clk);
+			tsif_device->tsif_ref_clk = NULL;
+			goto ret;
+		}
+	}
+	return 0;
+ret:
+	tsif_put_clocks(tsif_device);
+	return rc;
+}
+
+static void tsif_clock(struct msm_tsif_device *tsif_device, int on)
+{
+	if (on) {
+		if (tsif_device->tsif_clk)
+			clk_enable(tsif_device->tsif_clk);
+		if (tsif_device->tsif_pclk)
+			clk_enable(tsif_device->tsif_pclk);
+		clk_enable(tsif_device->tsif_ref_clk);
+	} else {
+		if (tsif_device->tsif_clk)
+			clk_disable(tsif_device->tsif_clk);
+		if (tsif_device->tsif_pclk)
+			clk_disable(tsif_device->tsif_pclk);
+		clk_disable(tsif_device->tsif_ref_clk);
+	}
+}
+/* ===clocks end=== */
+/* ===gpio begin=== */
+
+static void tsif_gpios_free(const struct msm_gpio *table, int size)
+{
+	int i;
+	const struct msm_gpio *g;
+	for (i = size-1; i >= 0; i--) {
+		g = table + i;
+		gpio_free(GPIO_PIN(g->gpio_cfg));
+	}
+}
+
+static int tsif_gpios_request(const struct msm_gpio *table, int size)
+{
+	int rc;
+	int i;
+	const struct msm_gpio *g;
+	for (i = 0; i < size; i++) {
+		g = table + i;
+		rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label);
+		if (rc) {
+			pr_err("gpio_request(%d) <%s> failed: %d\n",
+			       GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	tsif_gpios_free(table, i);
+	return rc;
+}
+
+static int tsif_gpios_disable(const struct msm_gpio *table, int size)
+{
+	int rc = 0;
+	int i;
+	const struct msm_gpio *g;
+	for (i = size-1; i >= 0; i--) {
+		int tmp;
+		g = table + i;
+		tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE);
+		if (tmp) {
+			pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_DISABLE)"
+			       " <%s> failed: %d\n",
+			       g->gpio_cfg, g->label ?: "?", rc);
+			pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
+			       GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+			       GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+			       GPIO_DRVSTR(g->gpio_cfg));
+			if (!rc)
+				rc = tmp;
+		}
+	}
+
+	return rc;
+}
+
+static int tsif_gpios_enable(const struct msm_gpio *table, int size)
+{
+	int rc;
+	int i;
+	const struct msm_gpio *g;
+	for (i = 0; i < size; i++) {
+		g = table + i;
+		rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
+		if (rc) {
+			pr_err("gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
+			       " <%s> failed: %d\n",
+			       g->gpio_cfg, g->label ?: "?", rc);
+			pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
+			       GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+			       GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+			       GPIO_DRVSTR(g->gpio_cfg));
+			goto err;
+		}
+	}
+	return 0;
+err:
+	tsif_gpios_disable(table, i);
+	return rc;
+}
+
+static int tsif_gpios_request_enable(const struct msm_gpio *table, int size)
+{
+	int rc = tsif_gpios_request(table, size);
+	if (rc)
+		return rc;
+	rc = tsif_gpios_enable(table, size);
+	if (rc)
+		tsif_gpios_free(table, size);
+	return rc;
+}
+
+static void tsif_gpios_disable_free(const struct msm_gpio *table, int size)
+{
+	tsif_gpios_disable(table, size);
+	tsif_gpios_free(table, size);
+}
+
+static int tsif_start_gpios(struct msm_tsif_device *tsif_device)
+{
+	struct msm_tsif_platform_data *pdata =
+		tsif_device->pdev->dev.platform_data;
+	return tsif_gpios_request_enable(pdata->gpios, pdata->num_gpios);
+}
+
+static void tsif_stop_gpios(struct msm_tsif_device *tsif_device)
+{
+	struct msm_tsif_platform_data *pdata =
+		tsif_device->pdev->dev.platform_data;
+	tsif_gpios_disable_free(pdata->gpios, pdata->num_gpios);
+}
+
+/* ===gpio end=== */
+
+static int tsif_start_hw(struct msm_tsif_device *tsif_device)
+{
+	u32 ctl = TSIF_STS_CTL_EN_IRQ |
+		  TSIF_STS_CTL_EN_TIME_LIM |
+		  TSIF_STS_CTL_EN_TCR |
+		  TSIF_STS_CTL_EN_DM;
+	dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
+	switch (tsif_device->mode) {
+	case 1: /* mode 1 */
+		ctl |= (0 << 5);
+		break;
+	case 2: /* mode 2 */
+		ctl |= (1 << 5);
+		break;
+	case 3: /* manual - control from debugfs */
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+	iowrite32(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+	iowrite32(tsif_device->time_limit,
+		  tsif_device->base + TSIF_TIME_LIMIT_OFF);
+	wmb();
+	iowrite32(ctl | TSIF_STS_CTL_START,
+		  tsif_device->base + TSIF_STS_CTL_OFF);
+	wmb();
+	ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+	return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
+}
+
+static void tsif_stop_hw(struct msm_tsif_device *tsif_device)
+{
+	iowrite32(TSIF_STS_CTL_STOP, tsif_device->base + TSIF_STS_CTL_OFF);
+	wmb();
+}
+
+/* ===DMA begin=== */
+/**
+ * TSIF DMA theory of operation
+ *
+ * Circular memory buffer \a tsif_mem_buffer allocated;
+ * 4 pointers points to and moved forward on:
+ * - \a ri index of first ready to read packet.
+ *      Updated by client's call to tsif_reclaim_packets()
+ * - \a wi points to the next packet to be written by DM.
+ *      Data below is valid and will not be overriden by DMA.
+ *      Moved on DM callback
+ * - \a dmwi points to the next packet not scheduled yet for DM
+ *      moved when packet scheduled for DM
+ *
+ * In addition, DM xfer keep internal \a wi - copy of \a tsif_device->dmwi
+ * at time immediately after scheduling.
+ *
+ * Initially, 2 packets get scheduled for the DM.
+ *
+ * Upon packet receive, DM writes packet to the pre-programmed
+ * location and invoke its callback.
+ *
+ * DM callback moves sets wi pointer to \a xfer->wi;
+ * then it schedules next packet for DM and moves \a dmwi pointer.
+ *
+ * Buffer overflow handling
+ *
+ * If \a dmwi == \a ri-1, buffer is full and \a dmwi can't be advanced.
+ * DMA re-scheduled to the same index.
+ * Callback check and not move \a wi to become equal to \a ri
+ *
+ * On \a read request, data between \a ri and \a wi pointers may be read;
+ * \ri pointer moved accordingly.
+ *
+ * It is always granted, on modulo sizeof(tsif_mem_buffer), that
+ * \a wi is between [\a ri, \a dmwi]
+ *
+ * Amount of data available is (wi-ri)*TSIF_PKT_SIZE
+ *
+ * Number of scheduled packets for DM: (dmwi-wi)
+ */
+
+/**
+ * tsif_dma_schedule - schedule DMA transfers
+ *
+ * @tsif_device: device
+ *
+ * Executed from process context on init, or from tasklet when
+ * re-scheduling upon DMA completion.
+ * This prevent concurrent execution from several CPU's
+ */
+static void tsif_dma_schedule(struct msm_tsif_device *tsif_device)
+{
+	int i, dmwi0, dmwi1, found = 0;
+	/* find free entry */
+	for (i = 0; i < 2; i++) {
+		struct tsif_xfer *xfer = &tsif_device->xfer[i];
+		if (xfer->busy)
+			continue;
+		found++;
+		xfer->busy = 1;
+		dmwi0 = tsif_device->dmwi;
+		tsif_device->dmov_cmd[i]->box.dst_row_addr =
+			tsif_device->data_buffer_dma + TSIF_PKT_SIZE * dmwi0;
+		/* proposed value for dmwi */
+		dmwi1 = (dmwi0 + TSIF_PKTS_IN_CHUNK) % TSIF_PKTS_IN_BUF;
+		/**
+		 * If dmwi going to overlap with ri,
+		 * overflow occurs because data was not read.
+		 * Still get this packet, to not interrupt TSIF
+		 * hardware, but do not advance dmwi.
+		 *
+		 * Upon receive, packet will be dropped.
+		 */
+		if (dmwi1 != tsif_device->ri) {
+			tsif_device->dmwi = dmwi1;
+		} else {
+			dev_info(&tsif_device->pdev->dev,
+				 "Overflow detected\n");
+		}
+		xfer->wi = tsif_device->dmwi;
+#ifdef CONFIG_TSIF_DEBUG
+		dev_info(&tsif_device->pdev->dev,
+			"schedule xfer[%d] -> [%2d]{%2d}\n",
+			i, dmwi0, xfer->wi);
+#endif
+		/* complete all the writes to box */
+		dma_coherent_pre_ops();
+		msm_dmov_enqueue_cmd(tsif_device->dma, &xfer->hdr);
+	}
+	if (!found)
+		dev_info(&tsif_device->pdev->dev,
+			 "All xfer entries are busy\n");
+}
+
+/**
+ * tsif_dmov_complete_func - DataMover completion callback
+ *
+ * @cmd:      original DM command
+ * @result:   DM result
+ * @err:      optional error buffer
+ *
+ * Executed in IRQ context (Data Mover's IRQ)
+ * DataMover's spinlock @msm_dmov_lock held.
+ */
+static void tsif_dmov_complete_func(struct msm_dmov_cmd *cmd,
+				    unsigned int result,
+				    struct msm_dmov_errdata *err)
+{
+	int i;
+	u32 data_offset;
+	struct tsif_xfer *xfer;
+	struct msm_tsif_device *tsif_device;
+	int reschedule = 0;
+	if (!(result & DMOV_RSLT_VALID)) { /* can I trust to @cmd? */
+		pr_err("Invalid DMOV result: rc=0x%08x, cmd = %p", result, cmd);
+		return;
+	}
+	/* restore original context */
+	xfer = container_of(cmd, struct tsif_xfer, hdr);
+	tsif_device = xfer->tsif_device;
+	i = xfer - tsif_device->xfer;
+	data_offset = tsif_device->dmov_cmd[i]->box.dst_row_addr -
+		      tsif_device->data_buffer_dma;
+
+	/* order reads from the xferred buffer */
+	dma_coherent_post_ops();
+	if (result & DMOV_RSLT_DONE) {
+		int w = data_offset / TSIF_PKT_SIZE;
+		tsif_device->stat_rx++;
+		/*
+		 * sowtware overflow when I was scheduled?
+		 *
+		 * @w is where this xfer was actually written to;
+		 * @xfer->wi is where device's @wi will be set;
+		 *
+		 * if these 2 are equal, we are short in space and
+		 * going to overwrite this xfer - this is "soft drop"
+		 */
+		if (w == xfer->wi)
+			tsif_device->stat_soft_drop++;
+		reschedule = (tsif_device->state == tsif_state_running);
+#ifdef CONFIG_TSIF_DEBUG
+		/* IFI calculation */
+		/*
+		 * update stat_ifi (inter frame interval)
+		 *
+		 * Calculate time difference between last and 1-st
+		 * packets in chunk
+		 *
+		 * To be removed after tuning
+		 */
+		if (TSIF_PKTS_IN_CHUNK > 1) {
+			void *ptr = tsif_device->data_buffer + data_offset;
+			u32 *p0 = ptr;
+			u32 *p1 = ptr + (TSIF_PKTS_IN_CHUNK - 1) *
+				TSIF_PKT_SIZE;
+			u32 tts0 = TSIF_STATUS_TTS(tsif_device->stat0 =
+						   tsif_pkt_status(p0));
+			u32 tts1 = TSIF_STATUS_TTS(tsif_device->stat1 =
+						   tsif_pkt_status(p1));
+			tsif_device->stat_ifi = (tts1 - tts0) /
+				(TSIF_PKTS_IN_CHUNK - 1);
+		}
+#endif
+	} else {
+		/**
+		 *  Error or flush
+		 *
+		 *  To recover - re-open TSIF device.
+		 */
+		/* mark status "not valid" in data buffer */
+		int n;
+		void *ptr = tsif_device->data_buffer + data_offset;
+		for (n = 0; n < TSIF_PKTS_IN_CHUNK; n++) {
+			u32 *p = ptr + (n * TSIF_PKT_SIZE);
+			/* last dword is status + TTS */
+			p[TSIF_PKT_SIZE / sizeof(*p) - 1] = 0;
+		}
+		if (result & DMOV_RSLT_ERROR) {
+			dev_err(&tsif_device->pdev->dev,
+				"DMA error (0x%08x)\n", result);
+			tsif_device->stat_dmov_err++;
+			/* force device close */
+			if (tsif_device->state == tsif_state_running) {
+				tsif_stop_hw(tsif_device);
+				/*
+				 * Clocks _may_ be stopped right from IRQ
+				 * context. This is far from optimal w.r.t
+				 * latency.
+				 *
+				 * But, this branch taken only in case of
+				 * severe hardware problem (I don't even know
+				 * what should happens for DMOV_RSLT_ERROR);
+				 * thus I prefer code simplicity over
+				 * performance.
+				 */
+				tsif_clock(tsif_device, 0);
+				tsif_device->state = tsif_state_flushing;
+			}
+		}
+		if (result & DMOV_RSLT_FLUSH) {
+			/*
+			 * Flushing normally happens in process of
+			 * @tsif_stop(), when we are waiting for outstanding
+			 * DMA commands to be flushed.
+			 */
+			dev_info(&tsif_device->pdev->dev,
+				 "DMA channel flushed (0x%08x)\n", result);
+			if (tsif_device->state == tsif_state_flushing) {
+				if ((!tsif_device->xfer[0].busy) &&
+				    (!tsif_device->xfer[1].busy)) {
+					tsif_device->state = tsif_state_stopped;
+				}
+			}
+		}
+		if (err)
+			dev_err(&tsif_device->pdev->dev,
+				"Flush data: %08x %08x %08x %08x %08x %08x\n",
+				err->flush[0], err->flush[1], err->flush[2],
+				err->flush[3], err->flush[4], err->flush[5]);
+	}
+	tsif_device->wi = xfer->wi;
+	xfer->busy = 0;
+	if (tsif_device->client_notify)
+		tsif_device->client_notify(tsif_device->client_data);
+	/*
+	 * Can't schedule next DMA -
+	 * DataMover driver still hold its semaphore,
+	 * deadlock will occur.
+	 */
+	if (reschedule)
+		tasklet_schedule(&tsif_device->dma_refill);
+}
+
+/**
+ * tsif_dma_refill - tasklet function for tsif_device->dma_refill
+ *
+ * @data:   tsif_device
+ *
+ * Reschedule DMA requests
+ *
+ * Executed in tasklet
+ */
+static void tsif_dma_refill(unsigned long data)
+{
+	struct msm_tsif_device *tsif_device = (struct msm_tsif_device *) data;
+	if (tsif_device->state == tsif_state_running)
+		tsif_dma_schedule(tsif_device);
+}
+
+/**
+ * tsif_dma_flush - flush DMA channel
+ *
+ * @tsif_device:
+ *
+ * busy wait till DMA flushed
+ */
+static void tsif_dma_flush(struct msm_tsif_device *tsif_device)
+{
+	if (tsif_device->xfer[0].busy || tsif_device->xfer[1].busy) {
+		tsif_device->state = tsif_state_flushing;
+		while (tsif_device->xfer[0].busy ||
+		       tsif_device->xfer[1].busy) {
+			msm_dmov_flush(tsif_device->dma, 1);
+			msleep(10);
+		}
+	}
+	tsif_device->state = tsif_state_stopped;
+	if (tsif_device->client_notify)
+		tsif_device->client_notify(tsif_device->client_data);
+}
+
+static void tsif_dma_exit(struct msm_tsif_device *tsif_device)
+{
+	int i;
+	tsif_device->state = tsif_state_flushing;
+	tasklet_kill(&tsif_device->dma_refill);
+	tsif_dma_flush(tsif_device);
+	for (i = 0; i < 2; i++) {
+		if (tsif_device->dmov_cmd[i]) {
+			dma_free_coherent(NULL, sizeof(struct tsif_dmov_cmd),
+					  tsif_device->dmov_cmd[i],
+					  tsif_device->dmov_cmd_dma[i]);
+			tsif_device->dmov_cmd[i] = NULL;
+		}
+	}
+	if (tsif_device->data_buffer) {
+		tsif_device->blob_wrapper_databuf.data = NULL;
+		tsif_device->blob_wrapper_databuf.size = 0;
+		dma_free_coherent(NULL, TSIF_BUF_SIZE,
+				  tsif_device->data_buffer,
+				  tsif_device->data_buffer_dma);
+		tsif_device->data_buffer = NULL;
+	}
+}
+
+static int tsif_dma_init(struct msm_tsif_device *tsif_device)
+{
+	int i;
+	/* TODO: allocate all DMA memory in one buffer */
+	/* Note: don't pass device,
+	   it require coherent_dma_mask id device definition */
+	tsif_device->data_buffer = dma_alloc_coherent(NULL, TSIF_BUF_SIZE,
+				&tsif_device->data_buffer_dma, GFP_KERNEL);
+	if (!tsif_device->data_buffer)
+		goto err;
+	dev_info(&tsif_device->pdev->dev, "data_buffer: %p phys 0x%08x\n",
+		 tsif_device->data_buffer, tsif_device->data_buffer_dma);
+	tsif_device->blob_wrapper_databuf.data = tsif_device->data_buffer;
+	tsif_device->blob_wrapper_databuf.size = TSIF_BUF_SIZE;
+	tsif_device->ri = 0;
+	tsif_device->wi = 0;
+	tsif_device->dmwi = 0;
+	for (i = 0; i < 2; i++) {
+		dmov_box *box;
+		struct msm_dmov_cmd *hdr;
+		tsif_device->dmov_cmd[i] = dma_alloc_coherent(NULL,
+			sizeof(struct tsif_dmov_cmd),
+			&tsif_device->dmov_cmd_dma[i], GFP_KERNEL);
+		if (!tsif_device->dmov_cmd[i])
+			goto err;
+		dev_info(&tsif_device->pdev->dev, "dma[%i]: %p phys 0x%08x\n",
+			 i, tsif_device->dmov_cmd[i],
+			 tsif_device->dmov_cmd_dma[i]);
+		/* dst in 16 LSB, src in 16 MSB */
+		box = &(tsif_device->dmov_cmd[i]->box);
+		box->cmd = CMD_MODE_BOX | CMD_LC |
+			   CMD_SRC_CRCI(tsif_device->crci);
+		box->src_row_addr =
+			tsif_device->memres->start + TSIF_DATA_PORT_OFF;
+		box->src_dst_len = (TSIF_PKT_SIZE << 16) | TSIF_PKT_SIZE;
+		box->num_rows = (TSIF_PKTS_IN_CHUNK << 16) | TSIF_PKTS_IN_CHUNK;
+		box->row_offset = (0 << 16) | TSIF_PKT_SIZE;
+
+		tsif_device->dmov_cmd[i]->box_ptr = CMD_PTR_LP |
+			DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
+				      offsetof(struct tsif_dmov_cmd, box));
+		tsif_device->xfer[i].tsif_device = tsif_device;
+		hdr = &tsif_device->xfer[i].hdr;
+		hdr->cmdptr = DMOV_CMD_ADDR(tsif_device->dmov_cmd_dma[i] +
+			      offsetof(struct tsif_dmov_cmd, box_ptr));
+		hdr->complete_func = tsif_dmov_complete_func;
+	}
+	msm_dmov_flush(tsif_device->dma, 1);
+	return 0;
+err:
+	dev_err(&tsif_device->pdev->dev, "Failed to allocate DMA buffers\n");
+	tsif_dma_exit(tsif_device);
+	return -ENOMEM;
+}
+
+/* ===DMA end=== */
+
+/* ===IRQ begin=== */
+
+static irqreturn_t tsif_irq(int irq, void *dev_id)
+{
+	struct msm_tsif_device *tsif_device = dev_id;
+	u32 sts_ctl = ioread32(tsif_device->base + TSIF_STS_CTL_OFF);
+	if (!(sts_ctl & (TSIF_STS_CTL_PACK_AVAIL |
+			 TSIF_STS_CTL_OVERFLOW |
+			 TSIF_STS_CTL_LOST_SYNC |
+			 TSIF_STS_CTL_TIMEOUT))) {
+		dev_warn(&tsif_device->pdev->dev, "Spurious interrupt\n");
+		return IRQ_NONE;
+	}
+	if (sts_ctl & TSIF_STS_CTL_PACK_AVAIL) {
+		dev_info(&tsif_device->pdev->dev, "TSIF IRQ: PACK_AVAIL\n");
+		tsif_device->stat_rx++;
+	}
+	if (sts_ctl & TSIF_STS_CTL_OVERFLOW) {
+		dev_info(&tsif_device->pdev->dev, "TSIF IRQ: OVERFLOW\n");
+		tsif_device->stat_overflow++;
+	}
+	if (sts_ctl & TSIF_STS_CTL_LOST_SYNC) {
+		dev_info(&tsif_device->pdev->dev, "TSIF IRQ: LOST SYNC\n");
+		tsif_device->stat_lost_sync++;
+	}
+	if (sts_ctl & TSIF_STS_CTL_TIMEOUT) {
+		dev_info(&tsif_device->pdev->dev, "TSIF IRQ: TIMEOUT\n");
+		tsif_device->stat_timeout++;
+	}
+	iowrite32(sts_ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+	wmb();
+	return IRQ_HANDLED;
+}
+
+/* ===IRQ end=== */
+
+/* ===Device attributes begin=== */
+
+static ssize_t show_stats(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	char *state_string;
+	switch (tsif_device->state) {
+	case tsif_state_stopped:
+		state_string = "stopped";
+		break;
+	case tsif_state_running:
+		state_string = "running";
+		break;
+	case tsif_state_flushing:
+		state_string = "flushing";
+		break;
+	default:
+		state_string = "???";
+	}
+	return snprintf(buf, PAGE_SIZE,
+			"Device       %s\n"
+			"Mode       = %d\n"
+			"Time limit = %d\n"
+			"State        %s\n"
+			"Client     = %p\n"
+			"Pkt/Buf    = %d\n"
+			"Pkt/chunk  = %d\n"
+			"--statistics--\n"
+			"Rx chunks  = %d\n"
+			"Overflow   = %d\n"
+			"Lost sync  = %d\n"
+			"Timeout    = %d\n"
+			"DMA error  = %d\n"
+			"Soft drop  = %d\n"
+			"IFI        = %d\n"
+			"(0x%08x - 0x%08x) / %d\n"
+			"--debug--\n"
+			"GLBL_CLK_ENA     = 0x%08x\n"
+			"ROW_RESET        = 0x%08x\n"
+			"CLK_HALT_STATEB  = 0x%08x\n"
+			"TV_NS_REG        = 0x%08x\n"
+			"TSIF_NS_REG      = 0x%08x\n",
+			dev_name(dev),
+			tsif_device->mode,
+			tsif_device->time_limit,
+			state_string,
+			tsif_device->client_data,
+			TSIF_PKTS_IN_BUF,
+			TSIF_PKTS_IN_CHUNK,
+			tsif_device->stat_rx,
+			tsif_device->stat_overflow,
+			tsif_device->stat_lost_sync,
+			tsif_device->stat_timeout,
+			tsif_device->stat_dmov_err,
+			tsif_device->stat_soft_drop,
+			tsif_device->stat_ifi,
+			tsif_device->stat1,
+			tsif_device->stat0,
+			TSIF_PKTS_IN_CHUNK - 1,
+			ioread32(GLBL_CLK_ENA),
+			ioread32(ROW_RESET),
+			ioread32(CLK_HALT_STATEB),
+			ioread32(TV_NS_REG),
+			ioread32(TSIF_NS_REG)
+			);
+}
+/**
+ * set_stats - reset statistics on write
+ *
+ * @dev:
+ * @attr:
+ * @buf:
+ * @count:
+ */
+static ssize_t set_stats(struct device *dev, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	tsif_device->stat_rx = 0;
+	tsif_device->stat_overflow = 0;
+	tsif_device->stat_lost_sync = 0;
+	tsif_device->stat_timeout = 0;
+	tsif_device->stat_dmov_err = 0;
+	tsif_device->stat_soft_drop = 0;
+	tsif_device->stat_ifi = 0;
+	return count;
+}
+static DEVICE_ATTR(stats, S_IRUGO | S_IWUSR, show_stats, set_stats);
+
+static ssize_t show_mode(struct device *dev, struct device_attribute *attr,
+			 char *buf)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->mode);
+}
+
+static ssize_t set_mode(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	int value;
+	int rc;
+	if (1 != sscanf(buf, "%d", &value)) {
+		dev_err(&tsif_device->pdev->dev,
+			"Failed to parse integer: <%s>\n", buf);
+		return -EINVAL;
+	}
+	rc = tsif_set_mode(tsif_device, value);
+	if (!rc)
+		rc = count;
+	return rc;
+}
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, show_mode, set_mode);
+
+static ssize_t show_time_limit(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	return snprintf(buf, PAGE_SIZE, "%d\n", tsif_device->time_limit);
+}
+
+static ssize_t set_time_limit(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	int value;
+	int rc;
+	if (1 != sscanf(buf, "%d", &value)) {
+		dev_err(&tsif_device->pdev->dev,
+			"Failed to parse integer: <%s>\n", buf);
+		return -EINVAL;
+	}
+	rc = tsif_set_time_limit(tsif_device, value);
+	if (!rc)
+		rc = count;
+	return rc;
+}
+static DEVICE_ATTR(time_limit, S_IRUGO | S_IWUSR,
+		   show_time_limit, set_time_limit);
+
+static ssize_t show_buf_config(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	return snprintf(buf, PAGE_SIZE, "%d * %d\n",
+			tsif_device->pkts_per_chunk,
+			tsif_device->chunks_per_buf);
+}
+
+static ssize_t set_buf_config(struct device *dev,
+			      struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct msm_tsif_device *tsif_device = dev_get_drvdata(dev);
+	u32 p, c;
+	int rc;
+	if (2 != sscanf(buf, "%d * %d", &p, &c)) {
+		dev_err(&tsif_device->pdev->dev,
+			"Failed to parse integer: <%s>\n", buf);
+		return -EINVAL;
+	}
+	rc = tsif_set_buf_config(tsif_device, p, c);
+	if (!rc)
+		rc = count;
+	return rc;
+}
+static DEVICE_ATTR(buf_config, S_IRUGO | S_IWUSR,
+		   show_buf_config, set_buf_config);
+
+static struct attribute *dev_attrs[] = {
+	&dev_attr_stats.attr,
+	&dev_attr_mode.attr,
+	&dev_attr_time_limit.attr,
+	&dev_attr_buf_config.attr,
+	NULL,
+};
+static struct attribute_group dev_attr_grp = {
+	.attrs = dev_attrs,
+};
+/* ===Device attributes end=== */
+
+/* ===debugfs begin=== */
+
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+	iowrite32(val, data);
+	wmb();
+	return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+	*val = ioread32(data);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+			debugfs_iomem_x32_set, "0x%08llx\n");
+
+struct dentry *debugfs_create_iomem_x32(const char *name, mode_t mode,
+					struct dentry *parent, u32 *value)
+{
+	return debugfs_create_file(name, mode, parent, value, &fops_iomem_x32);
+}
+
+static int action_open(struct msm_tsif_device *tsif_device)
+{
+	int rc = -EINVAL;
+	int result;
+
+	struct msm_tsif_platform_data *pdata =
+		tsif_device->pdev->dev.platform_data;
+	dev_info(&tsif_device->pdev->dev, "%s\n", __func__);
+	if (tsif_device->state != tsif_state_stopped)
+		return -EAGAIN;
+	rc = tsif_dma_init(tsif_device);
+	if (rc) {
+		dev_err(&tsif_device->pdev->dev, "failed to init DMA\n");
+		return rc;
+	}
+	tsif_device->state = tsif_state_running;
+	/*
+	 * DMA should be scheduled prior to TSIF hardware initialization,
+	 * otherwise "bus error" will be reported by Data Mover
+	 */
+	enable_irq(tsif_device->irq);
+	tsif_clock(tsif_device, 1);
+	tsif_dma_schedule(tsif_device);
+	/*
+	 * init the device if required
+	 */
+	if (pdata->init)
+		pdata->init(pdata);
+	rc = tsif_start_hw(tsif_device);
+	if (rc) {
+		dev_err(&tsif_device->pdev->dev, "Unable to start HW\n");
+		tsif_dma_exit(tsif_device);
+		tsif_clock(tsif_device, 0);
+		return rc;
+	}
+
+	result = pm_runtime_get(&tsif_device->pdev->dev);
+	if (result < 0) {
+		dev_err(&tsif_device->pdev->dev,
+			"Runtime PM: Unable to wake up the device, rc = %d\n",
+			result);
+		return result;
+	}
+
+	wake_lock(&tsif_device->wake_lock);
+	return rc;
+}
+
+static int action_close(struct msm_tsif_device *tsif_device)
+{
+	dev_info(&tsif_device->pdev->dev, "%s, state %d\n", __func__,
+		 (int)tsif_device->state);
+	/*
+	 * DMA should be flushed/stopped prior to TSIF hardware stop,
+	 * otherwise "bus error" will be reported by Data Mover
+	 */
+	tsif_stop_hw(tsif_device);
+	tsif_dma_exit(tsif_device);
+	tsif_clock(tsif_device, 0);
+	disable_irq(tsif_device->irq);
+
+	pm_runtime_put(&tsif_device->pdev->dev);
+	wake_unlock(&tsif_device->wake_lock);
+	return 0;
+}
+
+
+static struct {
+	int (*func)(struct msm_tsif_device *);
+	const char *name;
+} actions[] = {
+	{ action_open,  "open"},
+	{ action_close, "close"},
+};
+
+static ssize_t tsif_debugfs_action_write(struct file *filp,
+					 const char __user *userbuf,
+					 size_t count, loff_t *f_pos)
+{
+	int i;
+	struct msm_tsif_device *tsif_device = filp->private_data;
+	char s[40];
+	int len = min(sizeof(s) - 1, count);
+	if (copy_from_user(s, userbuf, len))
+		return -EFAULT;
+	s[len] = '\0';
+	dev_info(&tsif_device->pdev->dev, "%s:%s\n", __func__, s);
+	for (i = 0; i < ARRAY_SIZE(actions); i++) {
+		if (!strncmp(s, actions[i].name,
+		    min(count, strlen(actions[i].name)))) {
+			int rc = actions[i].func(tsif_device);
+			if (!rc)
+				rc = count;
+			return rc;
+		}
+	}
+	return -EINVAL;
+}
+
+static int tsif_debugfs_generic_open(struct inode *inode, struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations fops_debugfs_action = {
+	.open  = tsif_debugfs_generic_open,
+	.write = tsif_debugfs_action_write,
+};
+
+static ssize_t tsif_debugfs_dma_read(struct file *filp, char __user *userbuf,
+				     size_t count, loff_t *f_pos)
+{
+	static char bufa[200];
+	static char *buf = bufa;
+	int sz = sizeof(bufa);
+	struct msm_tsif_device *tsif_device = filp->private_data;
+	int len = 0;
+	if (tsif_device) {
+		int i;
+		len += snprintf(buf + len, sz - len,
+				"ri %3d | wi %3d | dmwi %3d |",
+				tsif_device->ri, tsif_device->wi,
+				tsif_device->dmwi);
+		for (i = 0; i < 2; i++) {
+			struct tsif_xfer *xfer = &tsif_device->xfer[i];
+			if (xfer->busy) {
+				u32 dst =
+				    tsif_device->dmov_cmd[i]->box.dst_row_addr;
+				u32 base = tsif_device->data_buffer_dma;
+				int w = (dst - base) / TSIF_PKT_SIZE;
+				len += snprintf(buf + len, sz - len,
+						" [%3d]{%3d}",
+						w, xfer->wi);
+			} else {
+				len += snprintf(buf + len, sz - len,
+						" ---idle---");
+			}
+		}
+			len += snprintf(buf + len, sz - len, "\n");
+	} else {
+		len += snprintf(buf + len, sz - len, "No TSIF device???\n");
+	}
+	return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
+}
+
+static const struct file_operations fops_debugfs_dma = {
+	.open = tsif_debugfs_generic_open,
+	.read = tsif_debugfs_dma_read,
+};
+
+static ssize_t tsif_debugfs_gpios_read(struct file *filp, char __user *userbuf,
+				       size_t count, loff_t *f_pos)
+{
+	static char bufa[300];
+	static char *buf = bufa;
+	int sz = sizeof(bufa);
+	struct msm_tsif_device *tsif_device = filp->private_data;
+	int len = 0;
+	if (tsif_device) {
+		struct msm_tsif_platform_data *pdata =
+			tsif_device->pdev->dev.platform_data;
+		int i;
+		for (i = 0; i < pdata->num_gpios; i++) {
+			if (pdata->gpios[i].gpio_cfg) {
+				int x = !!gpio_get_value(GPIO_PIN(
+					pdata->gpios[i].gpio_cfg));
+				len += snprintf(buf + len, sz - len,
+						"%15s: %d\n",
+						pdata->gpios[i].label, x);
+			}
+		}
+	} else {
+		len += snprintf(buf + len, sz - len, "No TSIF device???\n");
+	}
+	return simple_read_from_buffer(userbuf, count, f_pos, buf, len);
+}
+
+static const struct file_operations fops_debugfs_gpios = {
+	.open = tsif_debugfs_generic_open,
+	.read = tsif_debugfs_gpios_read,
+};
+
+
+static void tsif_debugfs_init(struct msm_tsif_device *tsif_device)
+{
+	tsif_device->dent_tsif = debugfs_create_dir(
+	      dev_name(&tsif_device->pdev->dev), NULL);
+	if (tsif_device->dent_tsif) {
+		int i;
+		void __iomem *base = tsif_device->base;
+		for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
+			tsif_device->debugfs_tsif_regs[i] =
+			   debugfs_create_iomem_x32(
+				debugfs_tsif_regs[i].name,
+				debugfs_tsif_regs[i].mode,
+				tsif_device->dent_tsif,
+				base + debugfs_tsif_regs[i].offset);
+		}
+		tsif_device->debugfs_gpio = debugfs_create_file("gpios",
+		    S_IRUGO,
+		    tsif_device->dent_tsif, tsif_device, &fops_debugfs_gpios);
+		tsif_device->debugfs_action = debugfs_create_file("action",
+		    S_IWUSR,
+		    tsif_device->dent_tsif, tsif_device, &fops_debugfs_action);
+		tsif_device->debugfs_dma = debugfs_create_file("dma",
+		    S_IRUGO,
+		    tsif_device->dent_tsif, tsif_device, &fops_debugfs_dma);
+		tsif_device->debugfs_databuf = debugfs_create_blob("data_buf",
+		    S_IRUGO,
+		    tsif_device->dent_tsif, &tsif_device->blob_wrapper_databuf);
+	}
+}
+
+static void tsif_debugfs_exit(struct msm_tsif_device *tsif_device)
+{
+	if (tsif_device->dent_tsif) {
+		int i;
+		debugfs_remove_recursive(tsif_device->dent_tsif);
+		tsif_device->dent_tsif = NULL;
+		for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
+			tsif_device->debugfs_tsif_regs[i] = NULL;
+		tsif_device->debugfs_gpio = NULL;
+		tsif_device->debugfs_action = NULL;
+		tsif_device->debugfs_dma = NULL;
+		tsif_device->debugfs_databuf = NULL;
+	}
+}
+/* ===debugfs end=== */
+
+/* ===module begin=== */
+static LIST_HEAD(tsif_devices);
+
+static struct msm_tsif_device *tsif_find_by_id(int id)
+{
+	struct msm_tsif_device *tsif_device;
+	list_for_each_entry(tsif_device, &tsif_devices, devlist) {
+		if (tsif_device->pdev->id == id)
+			return tsif_device;
+	}
+	return NULL;
+}
+
+static int __devinit msm_tsif_probe(struct platform_device *pdev)
+{
+	int rc = -ENODEV;
+	struct msm_tsif_platform_data *plat = pdev->dev.platform_data;
+	struct msm_tsif_device *tsif_device;
+	struct resource *res;
+	/* check device validity */
+	/* must have platform data */
+	if (!plat) {
+		dev_err(&pdev->dev, "Platform data not available\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if ((pdev->id < 0) || (pdev->id > TSIF_MAX_ID)) {
+		dev_err(&pdev->dev, "Invalid device ID %d\n", pdev->id);
+		rc = -EINVAL;
+		goto out;
+	}
+	/* OK, we will use this device */
+	tsif_device = kzalloc(sizeof(struct msm_tsif_device), GFP_KERNEL);
+	if (!tsif_device) {
+		dev_err(&pdev->dev, "Failed to allocate memory for device\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* cross links */
+	tsif_device->pdev = pdev;
+	platform_set_drvdata(pdev, tsif_device);
+	tsif_device->mode = 1;
+	tsif_device->pkts_per_chunk = TSIF_PKTS_IN_CHUNK_DEFAULT;
+	tsif_device->chunks_per_buf = TSIF_CHUNKS_IN_BUF_DEFAULT;
+	tasklet_init(&tsif_device->dma_refill, tsif_dma_refill,
+		     (unsigned long)tsif_device);
+	if (tsif_get_clocks(tsif_device))
+		goto err_clocks;
+/* map I/O memory */
+	tsif_device->memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!tsif_device->memres) {
+		dev_err(&pdev->dev, "Missing MEM resource\n");
+		rc = -ENXIO;
+		goto err_rgn;
+	}
+	res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "Missing DMA resource\n");
+		rc = -ENXIO;
+		goto err_rgn;
+	}
+	tsif_device->dma = res->start;
+	tsif_device->crci = res->end;
+	tsif_device->base = ioremap(tsif_device->memres->start,
+				    resource_size(tsif_device->memres));
+	if (!tsif_device->base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		goto err_ioremap;
+	}
+	dev_info(&pdev->dev, "remapped phys 0x%08x => virt %p\n",
+		 tsif_device->memres->start, tsif_device->base);
+	rc = tsif_start_gpios(tsif_device);
+	if (rc)
+		goto err_gpio;
+
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	tsif_debugfs_init(tsif_device);
+	rc = platform_get_irq(pdev, 0);
+	if (rc > 0) {
+		tsif_device->irq = rc;
+		rc = request_irq(tsif_device->irq, tsif_irq, IRQF_SHARED,
+				 dev_name(&pdev->dev), tsif_device);
+		disable_irq(tsif_device->irq);
+	}
+	if (rc) {
+		dev_err(&pdev->dev, "failed to request IRQ %d : %d\n",
+			tsif_device->irq, rc);
+		goto err_irq;
+	}
+	rc = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
+	if (rc) {
+		dev_err(&pdev->dev, "failed to create dev. attrs : %d\n", rc);
+		goto err_attrs;
+	}
+	wake_lock_init(&tsif_device->wake_lock, WAKE_LOCK_SUSPEND,
+		       dev_name(&pdev->dev));
+	dev_info(&pdev->dev, "Configured irq %d memory 0x%08x DMA %d CRCI %d\n",
+		 tsif_device->irq, tsif_device->memres->start,
+		 tsif_device->dma, tsif_device->crci);
+	list_add(&tsif_device->devlist, &tsif_devices);
+	return 0;
+/* error path */
+	sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+err_attrs:
+	free_irq(tsif_device->irq, tsif_device);
+err_irq:
+	tsif_debugfs_exit(tsif_device);
+	tsif_stop_gpios(tsif_device);
+err_gpio:
+	iounmap(tsif_device->base);
+err_ioremap:
+err_rgn:
+	tsif_put_clocks(tsif_device);
+err_clocks:
+	kfree(tsif_device);
+out:
+	return rc;
+}
+
+static int __devexit msm_tsif_remove(struct platform_device *pdev)
+{
+	struct msm_tsif_device *tsif_device = platform_get_drvdata(pdev);
+	dev_info(&pdev->dev, "Unload\n");
+	list_del(&tsif_device->devlist);
+	wake_lock_destroy(&tsif_device->wake_lock);
+	sysfs_remove_group(&pdev->dev.kobj, &dev_attr_grp);
+	free_irq(tsif_device->irq, tsif_device);
+	tsif_debugfs_exit(tsif_device);
+	tsif_dma_exit(tsif_device);
+	tsif_stop_gpios(tsif_device);
+	iounmap(tsif_device->base);
+	tsif_put_clocks(tsif_device);
+
+	pm_runtime_put(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	kfree(tsif_device);
+	return 0;
+}
+
+static int tsif_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...\n");
+	return 0;
+}
+
+static int tsif_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...\n");
+	return 0;
+}
+
+static const struct dev_pm_ops tsif_dev_pm_ops = {
+	.runtime_suspend = tsif_runtime_suspend,
+	.runtime_resume = tsif_runtime_resume,
+};
+
+
+static struct platform_driver msm_tsif_driver = {
+	.probe          = msm_tsif_probe,
+	.remove         = __exit_p(msm_tsif_remove),
+	.driver         = {
+		.name   = "msm_tsif",
+		.pm     = &tsif_dev_pm_ops,
+	},
+};
+
+static int __init mod_init(void)
+{
+	int rc = platform_driver_register(&msm_tsif_driver);
+	if (rc)
+		pr_err("TSIF: platform_driver_register failed: %d\n", rc);
+	return rc;
+}
+
+static void __exit mod_exit(void)
+{
+	platform_driver_unregister(&msm_tsif_driver);
+}
+/* ===module end=== */
+
+/* public API */
+
+int tsif_get_active(void)
+{
+	struct msm_tsif_device *tsif_device;
+	list_for_each_entry(tsif_device, &tsif_devices, devlist) {
+		return tsif_device->pdev->id;
+	}
+	return -ENODEV;
+}
+EXPORT_SYMBOL(tsif_get_active);
+
+void *tsif_attach(int id, void (*notify)(void *client_data), void *data)
+{
+	struct msm_tsif_device *tsif_device = tsif_find_by_id(id);
+	if (!tsif_device)
+		return ERR_PTR(-ENODEV);
+	if (tsif_device->client_notify || tsif_device->client_data)
+		return ERR_PTR(-EBUSY);
+	tsif_device->client_notify = notify;
+	tsif_device->client_data = data;
+	/* prevent from unloading */
+	get_device(&tsif_device->pdev->dev);
+	return tsif_device;
+}
+EXPORT_SYMBOL(tsif_attach);
+
+void tsif_detach(void *cookie)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	tsif_device->client_notify = NULL;
+	tsif_device->client_data = NULL;
+	put_device(&tsif_device->pdev->dev);
+}
+EXPORT_SYMBOL(tsif_detach);
+
+void tsif_get_info(void *cookie, void **pdata, int *psize)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	if (pdata)
+		*pdata = tsif_device->data_buffer;
+	if (psize)
+		*psize = TSIF_PKTS_IN_BUF;
+}
+EXPORT_SYMBOL(tsif_get_info);
+
+int tsif_set_mode(void *cookie, int mode)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	if (tsif_device->state != tsif_state_stopped) {
+		dev_err(&tsif_device->pdev->dev,
+			"Can't change mode while device is active\n");
+		return -EBUSY;
+	}
+	switch (mode) {
+	case 1:
+	case 2:
+	case 3:
+		tsif_device->mode = mode;
+		break;
+	default:
+		dev_err(&tsif_device->pdev->dev, "Invalid mode: %d\n", mode);
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(tsif_set_mode);
+
+int tsif_set_time_limit(void *cookie, u32 value)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	if (tsif_device->state != tsif_state_stopped) {
+		dev_err(&tsif_device->pdev->dev,
+			"Can't change time limit while device is active\n");
+		return -EBUSY;
+	}
+	if (value != (value & 0xFFFFFF)) {
+		dev_err(&tsif_device->pdev->dev,
+			"Invalid time limit (should be 24 bit): %#x\n", value);
+		return -EINVAL;
+	}
+	tsif_device->time_limit = value;
+	return 0;
+}
+EXPORT_SYMBOL(tsif_set_time_limit);
+
+int tsif_set_buf_config(void *cookie, u32 pkts_in_chunk, u32 chunks_in_buf)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	if (tsif_device->data_buffer) {
+		dev_err(&tsif_device->pdev->dev,
+			"Data buffer already allocated: %p\n",
+			tsif_device->data_buffer);
+		return -EBUSY;
+	}
+	/* check for crazy user */
+	if (pkts_in_chunk * chunks_in_buf > 10240) {
+		dev_err(&tsif_device->pdev->dev,
+			"Buffer requested is too large: %d * %d\n",
+			pkts_in_chunk,
+			chunks_in_buf);
+		return -EINVAL;
+	}
+	/* parameters are OK, execute */
+	tsif_device->pkts_per_chunk = pkts_in_chunk;
+	tsif_device->chunks_per_buf = chunks_in_buf;
+	return 0;
+}
+EXPORT_SYMBOL(tsif_set_buf_config);
+
+void tsif_get_state(void *cookie, int *ri, int *wi, enum tsif_state *state)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	if (ri)
+		*ri    = tsif_device->ri;
+	if (wi)
+		*wi    = tsif_device->wi;
+	if (state)
+		*state = tsif_device->state;
+}
+EXPORT_SYMBOL(tsif_get_state);
+
+int tsif_start(void *cookie)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	return action_open(tsif_device);
+}
+EXPORT_SYMBOL(tsif_start);
+
+void tsif_stop(void *cookie)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	action_close(tsif_device);
+}
+EXPORT_SYMBOL(tsif_stop);
+
+void tsif_reclaim_packets(void *cookie, int read_index)
+{
+	struct msm_tsif_device *tsif_device = cookie;
+	tsif_device->ri = read_index;
+}
+EXPORT_SYMBOL(tsif_reclaim_packets);
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSIF (Transport Stream Interface)"
+		   " Driver for the MSM chipset");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/tsif_chrdev.c b/drivers/misc/tsif_chrdev.c
new file mode 100644
index 0000000..122c7ed
--- /dev/null
+++ b/drivers/misc/tsif_chrdev.c
@@ -0,0 +1,230 @@
+/**
+ * TSIF driver client
+ *
+ * Character device that, being read
+ * returns stream of TSIF packets.
+ *
+ * Copyright (c) 2009-2011, Code Aurora Forum. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>       /* Needed by all modules */
+#include <linux/kernel.h>       /* Needed for KERN_INFO */
+#include <linux/cdev.h>
+#include <linux/err.h>          /* IS_ERR etc. */
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h>        /* TASK_INTERRUPTIBLE */
+
+#include <linux/uaccess.h>        /* copy_to_user */
+
+#include <linux/tsif_api.h>
+
+struct tsif_chrdev {
+	struct cdev cdev;
+	struct device *dev;
+	wait_queue_head_t wq_read;
+	void *cookie;
+	/* mirror for tsif data */
+	void *data_buffer;
+	unsigned buf_size_packets; /**< buffer size in packets */
+	unsigned ri, wi;
+	enum tsif_state state;
+	unsigned rptr;
+};
+
+static ssize_t tsif_open(struct inode *inode, struct file *file)
+{
+	int rc;
+	struct tsif_chrdev *the_dev =
+	       container_of(inode->i_cdev, struct tsif_chrdev, cdev);
+	if (!the_dev->cookie)  /* not bound yet */
+		return -ENODEV;
+	file->private_data = the_dev;
+	rc = tsif_start(the_dev->cookie);
+	if (rc)
+		return rc;
+	tsif_get_info(the_dev->cookie, &the_dev->data_buffer,
+		      &the_dev->buf_size_packets);
+	the_dev->rptr = 0;
+	return nonseekable_open(inode, file);
+}
+
+static ssize_t tsif_release(struct inode *inode, struct file *filp)
+{
+	struct tsif_chrdev *the_dev = filp->private_data;
+	tsif_stop(the_dev->cookie);
+	return 0;
+}
+
+static ssize_t tsif_read(struct file *filp, char __user *buf, size_t count,
+			 loff_t *f_pos)
+{
+	int avail = 0;
+	int wi;
+	struct tsif_chrdev *the_dev = filp->private_data;
+	tsif_get_state(the_dev->cookie, &the_dev->ri, &the_dev->wi,
+		       &the_dev->state);
+	/* consistency check */
+	if (the_dev->ri != (the_dev->rptr / TSIF_PKT_SIZE)) {
+		dev_err(the_dev->dev,
+			"%s: inconsistent read pointers: ri %d rptr %d\n",
+			__func__, the_dev->ri, the_dev->rptr);
+		the_dev->rptr = the_dev->ri * TSIF_PKT_SIZE;
+	}
+	/* ri == wi if no data */
+	if (the_dev->ri == the_dev->wi) {
+		/* shall I block waiting for data? */
+		if (filp->f_flags & O_NONBLOCK) {
+			if (the_dev->state == tsif_state_running) {
+				return -EAGAIN;
+			} else {
+				/* not running -> EOF */
+				return 0;
+			}
+		}
+		if (wait_event_interruptible(the_dev->wq_read,
+		      (the_dev->ri != the_dev->wi) ||
+		      (the_dev->state != tsif_state_running))) {
+			/* got signal -> tell FS to handle it */
+			return -ERESTARTSYS;
+		}
+		if (the_dev->ri == the_dev->wi) {
+			/* still no data -> EOF */
+			return 0;
+		}
+	}
+	/* contiguous chunk last up to wi or end of buffer */
+	wi = (the_dev->wi > the_dev->ri) ?
+		the_dev->wi : the_dev->buf_size_packets;
+	avail = min(wi * TSIF_PKT_SIZE - the_dev->rptr, count);
+	if (copy_to_user(buf, the_dev->data_buffer + the_dev->rptr, avail))
+		return -EFAULT;
+	the_dev->rptr = (the_dev->rptr + avail) %
+		(TSIF_PKT_SIZE * the_dev->buf_size_packets);
+	the_dev->ri = the_dev->rptr / TSIF_PKT_SIZE;
+	*f_pos += avail;
+	tsif_reclaim_packets(the_dev->cookie, the_dev->ri);
+	return avail;
+}
+
+static void tsif_notify(void *data)
+{
+	struct tsif_chrdev *the_dev = data;
+	tsif_get_state(the_dev->cookie, &the_dev->ri, &the_dev->wi,
+		       &the_dev->state);
+	wake_up_interruptible(&the_dev->wq_read);
+}
+
+static const struct file_operations tsif_fops = {
+	.owner   = THIS_MODULE,
+	.read    = tsif_read,
+	.open    = tsif_open,
+	.release = tsif_release,
+};
+
+static struct class *tsif_class;
+static dev_t tsif_dev;  /**< 1-st dev_t from allocated range */
+static dev_t tsif_dev0; /**< next not yet assigned dev_t */
+
+static int tsif_init_one(struct tsif_chrdev *the_dev, int index)
+{
+	int rc;
+	pr_info("%s[%d]\n", __func__, index);
+	cdev_init(&the_dev->cdev, &tsif_fops);
+	the_dev->cdev.owner = THIS_MODULE;
+	init_waitqueue_head(&the_dev->wq_read);
+	rc = cdev_add(&the_dev->cdev, tsif_dev0++, 1);
+	the_dev->dev = device_create(tsif_class, NULL, the_dev->cdev.dev,
+				     the_dev, "tsif%d", index);
+	if (IS_ERR(the_dev->dev)) {
+		rc = PTR_ERR(the_dev->dev);
+		pr_err("device_create failed: %d\n", rc);
+		goto err_create;
+	}
+	the_dev->cookie = tsif_attach(index, tsif_notify, the_dev);
+	if (IS_ERR(the_dev->cookie)) {
+		rc = PTR_ERR(the_dev->cookie);
+		pr_err("tsif_attach failed: %d\n", rc);
+		goto err_attach;
+	}
+	/* now data buffer is not allocated yet */
+	tsif_get_info(the_dev->cookie, &the_dev->data_buffer, NULL);
+	dev_info(the_dev->dev,
+		 "Device %d.%d attached to TSIF, buffer size %d\n",
+		 MAJOR(the_dev->cdev.dev), MINOR(the_dev->cdev.dev),
+		 the_dev->buf_size_packets);
+	return 0;
+err_attach:
+	device_destroy(tsif_class, the_dev->cdev.dev);
+err_create:
+	cdev_del(&the_dev->cdev);
+	return rc;
+}
+
+static void tsif_exit_one(struct tsif_chrdev *the_dev)
+{
+	dev_info(the_dev->dev, "%s\n", __func__);
+	tsif_detach(the_dev->cookie);
+	device_destroy(tsif_class, the_dev->cdev.dev);
+	cdev_del(&the_dev->cdev);
+}
+
+#define TSIF_NUM_DEVS 1 /**< support this many devices */
+
+struct tsif_chrdev the_devices[TSIF_NUM_DEVS];
+
+static int __init mod_init(void)
+{
+	int rc;
+	int instance;
+	rc = alloc_chrdev_region(&tsif_dev, 0, TSIF_NUM_DEVS, "tsif");
+	if (rc) {
+		pr_err("alloc_chrdev_region failed: %d\n", rc);
+		goto err_devrgn;
+	}
+	tsif_dev0 = tsif_dev;
+	tsif_class = class_create(THIS_MODULE, "tsif");
+	if (IS_ERR(tsif_class)) {
+		rc = PTR_ERR(tsif_class);
+		pr_err("Error creating tsif class: %d\n", rc);
+		goto err_class;
+	}
+	instance = tsif_get_active();
+	if (instance >= 0)
+		rc = tsif_init_one(&the_devices[0], instance);
+	else
+		rc = instance;
+	if (rc)
+		goto err_init1;
+	return 0;
+err_init1:
+	class_destroy(tsif_class);
+err_class:
+	unregister_chrdev_region(tsif_dev, TSIF_NUM_DEVS);
+err_devrgn:
+	return rc;
+}
+
+static void __exit mod_exit(void)
+{
+	tsif_exit_one(&the_devices[0]);
+	class_destroy(tsif_class);
+	unregister_chrdev_region(tsif_dev, TSIF_NUM_DEVS);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSIF character device interface");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/tspp.c b/drivers/misc/tspp.c
new file mode 100644
index 0000000..81c6b65
--- /dev/null
+++ b/drivers/misc/tspp.c
@@ -0,0 +1,2019 @@
+/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>			/* Needed by all modules */
+#include <linux/kernel.h>			/* Needed for KERN_INFO */
+#include <linux/init.h>				/* Needed for the macros */
+#include <linux/cdev.h>
+#include <linux/err.h>				/* IS_ERR */
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h>		/* TASK_INTERRUPTIBLE */
+#include <linux/uaccess.h>        /* copy_to_user */
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>          /* kfree, kzalloc */
+#include <linux/wakelock.h>
+#include <linux/io.h>            /* ioXXX */
+#include <linux/ioport.h>		/* XXX_ mem_region */
+#include <linux/dma-mapping.h>		/* dma_XXX */
+#include <linux/delay.h>		/* msleep */
+#include <linux/clk.h>
+#include <linux/poll.h>				/* poll() file op */
+#include <linux/wait.h>				/* wait() macros, sleeping */
+#include <linux/tspp.h>				/* tspp functions */
+#include <linux/bitops.h>        /* BIT() macro */
+#include <mach/sps.h>				/* BAM stuff */
+#include <mach/gpio.h>
+#include <mach/dma.h>
+#include <mach/msm_tspp.h>
+
+#define TSPP_USE_DEBUGFS
+#ifdef TSPP_USE_DEBUGFS
+#include <linux/debugfs.h>
+#endif /* TSPP_USE_DEBUGFS */
+
+/*
+ * General defines
+ */
+#define TSPP_USE_DMA_ALLOC_COHERENT
+#define TSPP_TSIF_INSTANCES            2
+#define TSPP_FILTER_TABLES             3
+#define TSPP_MAX_DEVICES               3
+#define TSPP_NUM_CHANNELS              16
+#define TSPP_NUM_PRIORITIES            16
+#define TSPP_NUM_KEYS                  8
+#define INVALID_CHANNEL                0xFFFFFFFF
+#define TSPP_SPS_DESCRIPTOR_COUNT      32
+#define TSPP_PACKET_LENGTH             188
+#define TSPP_MIN_BUFFER_SIZE           (TSPP_PACKET_LENGTH)
+#define TSPP_MAX_BUFFER_SIZE           (16 * 1024)	 /* maybe allow 64K? */
+#define TSPP_NUM_BUFFERS               16
+#define TSPP_TSIF_DEFAULT_TIME_LIMIT   60
+#define SPS_DESCRIPTOR_SIZE            8
+#define MIN_ACCEPTABLE_BUFFER_COUNT    2
+#define TSPP_DEBUG(msg...)             pr_info(msg)
+
+/*
+ * TSIF register offsets
+ */
+#define TSIF_STS_CTL_OFF               (0x0)
+#define TSIF_TIME_LIMIT_OFF            (0x4)
+#define TSIF_CLK_REF_OFF               (0x8)
+#define TSIF_LPBK_FLAGS_OFF            (0xc)
+#define TSIF_LPBK_DATA_OFF            (0x10)
+#define TSIF_TEST_CTL_OFF             (0x14)
+#define TSIF_TEST_MODE_OFF            (0x18)
+#define TSIF_TEST_RESET_OFF           (0x1c)
+#define TSIF_TEST_EXPORT_OFF          (0x20)
+#define TSIF_TEST_CURRENT_OFF         (0x24)
+
+#define TSIF_DATA_PORT_OFF            (0x100)
+
+/* bits for TSIF_STS_CTL register */
+#define TSIF_STS_CTL_EN_IRQ       BIT(28)
+#define TSIF_STS_CTL_PACK_AVAIL   BIT(27)
+#define TSIF_STS_CTL_1ST_PACKET   BIT(26)
+#define TSIF_STS_CTL_OVERFLOW     BIT(25)
+#define TSIF_STS_CTL_LOST_SYNC    BIT(24)
+#define TSIF_STS_CTL_TIMEOUT      BIT(23)
+#define TSIF_STS_CTL_INV_SYNC     BIT(21)
+#define TSIF_STS_CTL_INV_NULL     BIT(20)
+#define TSIF_STS_CTL_INV_ERROR    BIT(19)
+#define TSIF_STS_CTL_INV_ENABLE   BIT(18)
+#define TSIF_STS_CTL_INV_DATA     BIT(17)
+#define TSIF_STS_CTL_INV_CLOCK    BIT(16)
+#define TSIF_STS_CTL_SPARE        BIT(15)
+#define TSIF_STS_CTL_EN_NULL      BIT(11)
+#define TSIF_STS_CTL_EN_ERROR     BIT(10)
+#define TSIF_STS_CTL_LAST_BIT     BIT(9)
+#define TSIF_STS_CTL_EN_TIME_LIM  BIT(8)
+#define TSIF_STS_CTL_EN_TCR       BIT(7)
+#define TSIF_STS_CTL_TEST_MODE    BIT(6)
+#define TSIF_STS_CTL_EN_DM        BIT(4)
+#define TSIF_STS_CTL_STOP         BIT(3)
+#define TSIF_STS_CTL_START        BIT(0)
+
+/*
+ * TSPP register offsets
+ */
+#define TSPP_RST					0x00
+#define TSPP_CLK_CONTROL		0x04
+#define TSPP_CONFIG				0x08
+#define TSPP_CONTROL				0x0C
+#define TSPP_PS_DISABLE			0x10
+#define TSPP_MSG_IRQ_STATUS	0x14
+#define TSPP_MSG_IRQ_MASK		0x18
+#define TSPP_IRQ_STATUS			0x1C
+#define TSPP_IRQ_MASK			0x20
+#define TSPP_IRQ_CLEAR			0x24
+#define TSPP_PIPE_ERROR_STATUS(_n)	(0x28 + (_n << 2))
+#define TSPP_STATUS				0x68
+#define TSPP_CURR_TSP_HEADER	0x6C
+#define TSPP_CURR_PID_FILTER	0x70
+#define TSPP_SYSTEM_KEY(_n)	(0x74 + (_n << 2))
+#define TSPP_CBC_INIT_VAL(_n)	(0x94 + (_n << 2))
+#define TSPP_DATA_KEY_RESET	0x9C
+#define TSPP_KEY_VALID			0xA0
+#define TSPP_KEY_ERROR			0xA4
+#define TSPP_TEST_CTRL			0xA8
+#define TSPP_VERSION				0xAC
+#define TSPP_GENERICS			0xB0
+#define TSPP_NOP					0xB4
+
+/*
+ * Register bit definitions
+ */
+/* TSPP_RST */
+#define TSPP_RST_RESET                    BIT(0)
+
+/* TSPP_CLK_CONTROL	*/
+#define TSPP_CLK_CONTROL_FORCE_CRYPTO     BIT(9)
+#define TSPP_CLK_CONTROL_FORCE_PES_PL     BIT(8)
+#define TSPP_CLK_CONTROL_FORCE_PES_AF     BIT(7)
+#define TSPP_CLK_CONTROL_FORCE_RAW_CTRL   BIT(6)
+#define TSPP_CLK_CONTROL_FORCE_PERF_CNT   BIT(5)
+#define TSPP_CLK_CONTROL_FORCE_CTX_SEARCH BIT(4)
+#define TSPP_CLK_CONTROL_FORCE_TSP_PROC   BIT(3)
+#define TSPP_CLK_CONTROL_FORCE_CONS_AHB2MEM BIT(2)
+#define TSPP_CLK_CONTROL_FORCE_TS_AHB2MEM BIT(1)
+#define TSPP_CLK_CONTROL_SET_CLKON        BIT(0)
+
+/* TSPP_CONFIG	*/
+#define TSPP_CONFIG_SET_PACKET_LENGTH(_a, _b) (_a = (_a & 0xF0) | \
+((_b & 0xF) << 8))
+#define TSPP_CONFIG_GET_PACKET_LENGTH(_a) ((_a >> 8) & 0xF)
+#define TSPP_CONFIG_DUP_WITH_DISC_EN		BIT(7)
+#define TSPP_CONFIG_PES_SYNC_ERROR_MASK   BIT(6)
+#define TSPP_CONFIG_PS_LEN_ERR_MASK       BIT(5)
+#define TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK BIT(4)
+#define TSPP_CONFIG_PS_CONT_ERR_MASK      BIT(3)
+#define TSPP_CONFIG_PS_DUP_TSP_MASK       BIT(2)
+#define TSPP_CONFIG_TSP_ERR_IND_MASK      BIT(1)
+#define TSPP_CONFIG_TSP_SYNC_ERR_MASK     BIT(0)
+
+/* TSPP_CONTROL */
+#define TSPP_CONTROL_PID_FILTER_LOCK      BIT(5)
+#define TSPP_CONTROL_FORCE_KEY_CALC       BIT(4)
+#define TSPP_CONTROL_TSP_CONS_SRC_DIS     BIT(3)
+#define TSPP_CONTROL_TSP_TSIF1_SRC_DIS    BIT(2)
+#define TSPP_CONTROL_TSP_TSIF0_SRC_DIS    BIT(1)
+#define TSPP_CONTROL_PERF_COUNT_INIT      BIT(0)
+
+/* TSPP_MSG_IRQ_STATUS + TSPP_MSG_IRQ_MASK */
+#define TSPP_MSG_TSPP_IRQ                 BIT(2)
+#define TSPP_MSG_TSIF_1_IRQ               BIT(1)
+#define TSPP_MSG_TSIF_0_IRQ               BIT(0)
+
+/* TSPP_IRQ_STATUS + TSPP_IRQ_MASK + TSPP_IRQ_CLEAR */
+#define TSPP_IRQ_STATUS_TSP_RD_CMPL			BIT(19)
+#define TSPP_IRQ_STATUS_KEY_ERROR			BIT(18)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED_BAD	BIT(17)
+#define TSPP_IRQ_STATUS_KEY_SWITCHED		BIT(16)
+#define TSPP_IRQ_STATUS_PS_BROKEN(_n)		BIT((_n))
+
+/* TSPP_PIPE_ERROR_STATUS */
+#define TSPP_PIPE_PES_SYNC_ERROR				BIT(3)
+#define TSPP_PIPE_PS_LENGTH_ERROR			BIT(2)
+#define TSPP_PIPE_PS_CONTINUITY_ERROR		BIT(1)
+#define TSPP_PIP_PS_LOST_START				BIT(0)
+
+/* TSPP_STATUS			*/
+#define TSPP_STATUS_TSP_PKT_AVAIL			BIT(10)
+#define TSPP_STATUS_TSIF1_DM_REQ				BIT(6)
+#define TSPP_STATUS_TSIF0_DM_REQ				BIT(2)
+#define TSPP_CURR_FILTER_TABLE				BIT(0)
+
+/* TSPP_GENERICS		*/
+#define TSPP_GENERICS_CRYPTO_GEN				BIT(12)
+#define TSPP_GENERICS_MAX_CONS_PIPES		BIT(7)
+#define TSPP_GENERICS_MAX_PIPES				BIT(2)
+#define TSPP_GENERICS_TSIF_1_GEN				BIT(1)
+#define TSPP_GENERICS_TSIF_0_GEN				BIT(0)
+
+/*
+ * TSPP memory regions
+ */
+#define TSPP_PID_FILTER_TABLE0      0x800
+#define TSPP_PID_FILTER_TABLE1      0x880
+#define TSPP_PID_FILTER_TABLE2      0x900
+#define TSPP_GLOBAL_PERFORMANCE     0x980 /* see tspp_global_performance */
+#define TSPP_PIPE_CONTEXT           0x990 /* see tspp_pipe_context */
+#define TSPP_PIPE_PERFORMANCE       0x998 /* see tspp_pipe_performance */
+#define TSPP_TSP_BUFF_WORD(_n)      (0xC10 + (_n << 2))
+#define TSPP_DATA_KEY               0xCD0
+
+#ifdef TSPP_USE_DEBUGFS
+struct debugfs_entry {
+	const char *name;
+	mode_t mode;
+	int offset;
+};
+
+static const struct debugfs_entry debugfs_tsif_regs[] = {
+	{"sts_ctl",             S_IRUGO | S_IWUSR, TSIF_STS_CTL_OFF},
+	{"time_limit",          S_IRUGO | S_IWUSR, TSIF_TIME_LIMIT_OFF},
+	{"clk_ref",             S_IRUGO | S_IWUSR, TSIF_CLK_REF_OFF},
+	{"lpbk_flags",          S_IRUGO | S_IWUSR, TSIF_LPBK_FLAGS_OFF},
+	{"lpbk_data",           S_IRUGO | S_IWUSR, TSIF_LPBK_DATA_OFF},
+	{"test_ctl",            S_IRUGO | S_IWUSR, TSIF_TEST_CTL_OFF},
+	{"test_mode",           S_IRUGO | S_IWUSR, TSIF_TEST_MODE_OFF},
+	{"test_reset",                    S_IWUSR, TSIF_TEST_RESET_OFF},
+	{"test_export",         S_IRUGO | S_IWUSR, TSIF_TEST_EXPORT_OFF},
+	{"test_current",        S_IRUGO,           TSIF_TEST_CURRENT_OFF},
+	{"data_port",           S_IRUSR,           TSIF_DATA_PORT_OFF},
+};
+
+static const struct debugfs_entry debugfs_tspp_regs[] = {
+	{"rst",                 S_IRUGO | S_IWUSR, TSPP_RST},
+	{"clk_control",         S_IRUGO | S_IWUSR, TSPP_CLK_CONTROL},
+	{"config",              S_IRUGO | S_IWUSR, TSPP_CONFIG},
+	{"control",             S_IRUGO | S_IWUSR, TSPP_CONTROL},
+	{"ps_disable",          S_IRUGO | S_IWUSR, TSPP_PS_DISABLE},
+	{"msg_irq_status",      S_IRUGO | S_IWUSR, TSPP_MSG_IRQ_STATUS},
+	{"msg_irq_mask",        S_IRUGO | S_IWUSR, TSPP_MSG_IRQ_MASK},
+	{"irq_status",          S_IRUGO | S_IWUSR, TSPP_IRQ_STATUS},
+	{"irq_mask",            S_IRUGO | S_IWUSR, TSPP_IRQ_MASK},
+	{"irq_clear",           S_IRUGO | S_IWUSR, TSPP_IRQ_CLEAR},
+	/* {"pipe_error_status",S_IRUGO | S_IWUSR, TSPP_PIPE_ERROR_STATUS}, */
+	{"status",              S_IRUGO | S_IWUSR, TSPP_STATUS},
+	{"curr_tsp_header",     S_IRUGO | S_IWUSR, TSPP_CURR_TSP_HEADER},
+	{"curr_pid_filter",     S_IRUGO | S_IWUSR, TSPP_CURR_PID_FILTER},
+	/* {"system_key",       S_IRUGO | S_IWUSR, TSPP_SYSTEM_KEY}, */
+	/* {"cbc_init_val",     S_IRUGO | S_IWUSR, TSPP_CBC_INIT_VAL}, */
+	{"data_key_reset",      S_IRUGO | S_IWUSR, TSPP_DATA_KEY_RESET},
+	{"key_valid",           S_IRUGO | S_IWUSR, TSPP_KEY_VALID},
+	{"key_error",           S_IRUGO | S_IWUSR, TSPP_KEY_ERROR},
+	{"test_ctrl",           S_IRUGO | S_IWUSR, TSPP_TEST_CTRL},
+	{"version",             S_IRUGO | S_IWUSR, TSPP_VERSION},
+	{"generics",            S_IRUGO | S_IWUSR, TSPP_GENERICS},
+	{"pid_filter_table0",   S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE0},
+	{"pid_filter_table1",   S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE1},
+	{"pid_filter_table2",   S_IRUGO | S_IWUSR, TSPP_PID_FILTER_TABLE2},
+	{"global_performance",  S_IRUGO | S_IWUSR, TSPP_GLOBAL_PERFORMANCE},
+	{"pipe_context",        S_IRUGO | S_IWUSR, TSPP_PIPE_CONTEXT},
+	{"pipe_performance",    S_IRUGO | S_IWUSR, TSPP_PIPE_PERFORMANCE},
+	{"data_key",            S_IRUGO | S_IWUSR, TSPP_DATA_KEY}
+};
+
+#endif /* TSPP_USE_DEBUGFS */
+
+struct tspp_pid_filter {
+	u32 filter;			/* see FILTER_ macros */
+	u32 config;			/* see FILTER_ macros */
+};
+
+/* tsp_info */
+#define FILTER_HEADER_ERROR_MASK          BIT(7)
+#define FILTER_TRANS_END_DISABLE          BIT(6)
+#define FILTER_DEC_ON_ERROR_EN            BIT(5)
+#define FILTER_DECRYPT                    BIT(4)
+#define FILTER_HAS_ENCRYPTION(_p)         (_p->config & FILTER_DECRYPT)
+#define FILTER_GET_PIPE_NUMBER0(_p)       (_p->config & 0xF)
+#define FILTER_SET_PIPE_NUMBER0(_p, _b)   (_p->config = \
+			(_p->config & ~0xF) | (_b & 0xF))
+#define FILTER_GET_PIPE_PROCESS0(_p)      ((_p->filter >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS0(_p, _b)  (_p->filter = \
+			(_p->filter & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_PIPE_PID(_p)           ((_p->filter >> 13) & 0x1FFF)
+#define FILTER_SET_PIPE_PID(_p, _b)       (_p->filter = \
+			(_p->filter & ~(0x1FFF<<13)) | ((_b & 0x1FFF) << 13))
+#define FILTER_GET_PID_MASK(_p)           (_p->filter & 0x1FFF)
+#define FILTER_SET_PID_MASK(_p, _b)       (_p->filter = \
+			(_p->filter & ~0x1FFF) | (_b & 0x1FFF))
+#define FILTER_GET_PIPE_PROCESS1(_p)      ((_p->config >> 30) & 0x3)
+#define FILTER_SET_PIPE_PROCESS1(_p, _b)  (_p->config = \
+			(_p->config & ~(0x3<<30)) | ((_b & 0x3) << 30))
+#define FILTER_GET_KEY_NUMBER(_p)         ((_p->config >> 8) & 0x7)
+#define FILTER_SET_KEY_NUMBER(_p, _b)     (_p->config = \
+			(_p->config & ~(0x7<<8)) | ((_b & 0x7) << 8))
+
+struct tspp_global_performance_regs {
+	u32 tsp_total;
+	u32 tsp_ignored;
+	u32 tsp_error;
+	u32 tsp_sync;
+};
+
+struct tspp_pipe_context_regs {
+	u16 pes_bytes_left;
+	u16 count;
+	u32 tsif_suffix;
+} __packed;
+#define CONTEXT_GET_STATE(_a)					(_a & 0x3)
+#define CONTEXT_UNSPEC_LENGTH					BIT(11)
+#define CONTEXT_GET_CONT_COUNT(_a)			((_a >> 12) & 0xF)
+
+struct tspp_pipe_performance_regs {
+	u32 tsp_total;
+	u32 ps_duplicate_tsp;
+	u32 tsp_no_payload;
+	u32 tsp_broken_ps;
+	u32 ps_total_num;
+	u32 ps_continuity_error;
+	u32 ps_length_error;
+	u32 pes_sync_error;
+};
+
+struct tspp_tsif_device {
+	void __iomem *base;
+	u32 time_limit;
+	u32 ref_count;
+
+	/* debugfs */
+#ifdef TSPP_USE_DEBUGFS
+	struct dentry *dent_tsif;
+	struct dentry *debugfs_tsif_regs[ARRAY_SIZE(debugfs_tsif_regs)];
+#endif /* TSPP_USE_DEBUGFS */
+};
+
+/* this represents the actual hardware device */
+struct tspp_device {
+	struct platform_device *pdev;
+	void __iomem *base;
+	unsigned int tspp_irq;
+	unsigned int bam_irq;
+	u32 bam_handle;
+	struct sps_bam_props bam_props;
+	struct wake_lock wake_lock;
+	spinlock_t spinlock;
+	struct tasklet_struct tlet;
+	struct tspp_tsif_device tsif[TSPP_TSIF_INSTANCES];
+	/* clocks */
+	struct clk *tsif_pclk;
+	struct clk *tsif_ref_clk;
+
+#ifdef TSPP_USE_DEBUGFS
+	struct dentry *dent;
+	struct dentry *debugfs_regs[ARRAY_SIZE(debugfs_tspp_regs)];
+#endif /* TSPP_USE_DEBUGFS */
+};
+
+enum tspp_buf_state {
+	TSPP_BUF_STATE_EMPTY,	/* buffer has been allocated, but not waiting */
+	TSPP_BUF_STATE_WAITING, /* buffer is waiting to be filled */
+	TSPP_BUF_STATE_DATA     /* buffer is not empty and can be read */
+};
+
+struct tspp_mem_buffer {
+	struct sps_mem_buffer mem;
+	enum tspp_buf_state state;
+	size_t filled;          /* how much data this buffer is holding */
+	int read_index;         /* where to start reading data from */
+};
+
+/* this represents each char device 'channel' */
+struct tspp_channel {
+	struct cdev cdev;
+	struct device *dd;
+	struct tspp_device *pdev;
+	struct sps_pipe *pipe;
+	struct sps_connect config;
+	struct sps_register_event event;
+	struct tspp_mem_buffer buffer[TSPP_NUM_BUFFERS];
+	wait_queue_head_t in_queue; /* set when data is received */
+	int read;  /* index into mem showing buffers ready to be read by user */
+	int waiting;	/* index into mem showing outstanding transfers */
+	int id;			/* channel id (0-15) */
+	int used;		/* is this channel in use? */
+	int key;			/* which encryption key index is used */
+	u32 bufsize;	/* size of the sps transfer buffers */
+	int buffer_count; /* how many buffers are actually allocated */
+	int filter_count; /* how many filters have been added to this channel */
+	enum tspp_source src;
+	enum tspp_mode mode;
+};
+
+struct tspp_pid_filter_table {
+	struct tspp_pid_filter filter[TSPP_NUM_PRIORITIES];
+};
+
+struct tspp_key_entry {
+	u32 even_lsb;
+	u32 even_msb;
+	u32 odd_lsb;
+	u32 odd_msb;
+};
+
+struct tspp_key_table {
+	struct tspp_key_entry entry[TSPP_NUM_KEYS];
+};
+
+static struct tspp_pid_filter_table *tspp_filter_table[TSPP_FILTER_TABLES];
+static struct tspp_channel channel_list[TSPP_NUM_CHANNELS];
+static struct tspp_key_table *tspp_key_table;
+static struct tspp_global_performance_regs *tspp_global_performance;
+static struct tspp_pipe_context_regs *tspp_pipe_context;
+static struct tspp_pipe_performance_regs *tspp_pipe_performance;
+static struct class *tspp_class;
+static int tspp_key_entry;
+static dev_t tspp_minor;  /* next minor number to assign */
+static int loopback_mode; /* put tsif interfaces into loopback mode */
+
+/*** IRQ ***/
+static irqreturn_t tspp_isr(int irq, void *dev_id)
+{
+	struct tspp_device *device = dev_id;
+	u32 status, mask;
+	u32 data;
+
+	status = readl_relaxed(device->base + TSPP_IRQ_STATUS);
+	mask = readl_relaxed(device->base + TSPP_IRQ_MASK);
+	status &= mask;
+
+	if (!status) {
+		dev_warn(&device->pdev->dev, "Spurious interrupt");
+		return IRQ_NONE;
+	}
+
+	/* if (status & TSPP_IRQ_STATUS_TSP_RD_CMPL) */
+
+	if (status & TSPP_IRQ_STATUS_KEY_ERROR) {
+		/* read the key error info */
+		data = readl_relaxed(device->base + TSPP_KEY_ERROR);
+		dev_info(&device->pdev->dev, "key error 0x%x", data);
+	}
+	if (status & TSPP_IRQ_STATUS_KEY_SWITCHED_BAD) {
+		data = readl_relaxed(device->base + TSPP_KEY_VALID);
+		dev_info(&device->pdev->dev, "key invalidated: 0x%x", data);
+	}
+	if (status & TSPP_IRQ_STATUS_KEY_SWITCHED)
+		dev_info(&device->pdev->dev, "key switched");
+
+	if (status & 0xffff)
+		dev_info(&device->pdev->dev, "broken pipe");
+
+	writel_relaxed(status, device->base + TSPP_IRQ_CLEAR);
+	wmb();
+	return IRQ_HANDLED;
+}
+
+/*** callbacks ***/
+static void tspp_sps_complete_cb(struct sps_event_notify *notify)
+{
+	struct tspp_channel *channel = notify->user;
+	tasklet_schedule(&channel->pdev->tlet);
+}
+
+/*** tasklet ***/
+static void tspp_sps_complete_tlet(unsigned long data)
+{
+	int i;
+	int complete;
+	unsigned long flags;
+	struct sps_iovec iovec;
+	struct tspp_channel *channel;
+	struct tspp_device *device = (struct tspp_device *)data;
+	struct tspp_mem_buffer *buffer;
+
+	spin_lock_irqsave(&device->spinlock, flags);
+
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+		complete = 0;
+		channel = &channel_list[i];
+		buffer = &channel->buffer[channel->waiting];
+
+		/* get completions */
+		if (buffer->state == TSPP_BUF_STATE_WAITING) {
+			if (sps_get_iovec(channel->pipe, &iovec) != 0) {
+				pr_err("tspp: Error in iovec on channel %i",
+					channel->id);
+				break;
+			}
+			if (iovec.size == 0)
+				break;
+
+			if (iovec.addr != buffer->mem.phys_base)
+				pr_err("tspp: buffer mismatch 0x%08x",
+					buffer->mem.phys_base);
+
+			complete = 1;
+			buffer->state = TSPP_BUF_STATE_DATA;
+			buffer->filled = iovec.size;
+			buffer->read_index = 0;
+			channel->waiting++;
+			if (channel->waiting == TSPP_NUM_BUFFERS)
+				channel->waiting = 0;
+		}
+
+		if (complete) {
+			/* wake any waiting processes */
+			wake_up_interruptible(&channel->in_queue);
+		}
+	}
+
+	spin_unlock_irqrestore(&device->spinlock, flags);
+}
+
+/*** GPIO functions ***/
+static void tspp_gpios_free(const struct msm_gpio *table, int size)
+{
+	int i;
+	const struct msm_gpio *g;
+	for (i = size-1; i >= 0; i--) {
+		g = table + i;
+		gpio_free(GPIO_PIN(g->gpio_cfg));
+	}
+}
+
+static int tspp_gpios_request(const struct msm_gpio *table, int size)
+{
+	int rc;
+	int i;
+	const struct msm_gpio *g;
+	for (i = 0; i < size; i++) {
+		g = table + i;
+		rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label);
+		if (rc) {
+			pr_err("tspp: gpio_request(%d) <%s> failed: %d\n",
+			       GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc);
+			goto err;
+		}
+	}
+	return 0;
+err:
+	tspp_gpios_free(table, i);
+	return rc;
+}
+
+static int tspp_gpios_disable(const struct msm_gpio *table, int size)
+{
+	int rc = 0;
+	int i;
+	const struct msm_gpio *g;
+	for (i = size-1; i >= 0; i--) {
+		int tmp;
+		g = table + i;
+		tmp = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_DISABLE);
+		if (tmp) {
+			pr_err("tspp_gpios_disable(0x%08x, GPIO_CFG_DISABLE)"
+			       " <%s> failed: %d\n",
+			       g->gpio_cfg, g->label ?: "?", rc);
+			pr_err("tspp: pin %d func %d dir %d pull %d drvstr %d\n",
+			       GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+			       GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+			       GPIO_DRVSTR(g->gpio_cfg));
+			if (!rc)
+				rc = tmp;
+		}
+	}
+
+	return rc;
+}
+
+static int tspp_gpios_enable(const struct msm_gpio *table, int size)
+{
+	int rc;
+	int i;
+	const struct msm_gpio *g;
+	for (i = 0; i < size; i++) {
+		g = table + i;
+		rc = gpio_tlmm_config(g->gpio_cfg, GPIO_CFG_ENABLE);
+		if (rc) {
+			pr_err("tspp: gpio_tlmm_config(0x%08x, GPIO_CFG_ENABLE)"
+			       " <%s> failed: %d\n",
+			       g->gpio_cfg, g->label ?: "?", rc);
+			pr_err("tspp: pin %d func %d dir %d pull %d drvstr %d\n",
+			       GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+			       GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+			       GPIO_DRVSTR(g->gpio_cfg));
+			goto err;
+		}
+	}
+	return 0;
+err:
+	tspp_gpios_disable(table, i);
+	return rc;
+}
+
+static int tspp_gpios_request_enable(const struct msm_gpio *table, int size)
+{
+	int rc = tspp_gpios_request(table, size);
+	if (rc)
+		return rc;
+	rc = tspp_gpios_enable(table, size);
+	if (rc)
+		tspp_gpios_free(table, size);
+	return rc;
+}
+
+static void tspp_gpios_disable_free(const struct msm_gpio *table, int size)
+{
+	tspp_gpios_disable(table, size);
+	tspp_gpios_free(table, size);
+}
+
+static int tspp_start_gpios(struct tspp_device *device)
+{
+	struct msm_tspp_platform_data *pdata =
+		device->pdev->dev.platform_data;
+	return tspp_gpios_request_enable(pdata->gpios, pdata->num_gpios);
+}
+
+static void tspp_stop_gpios(struct tspp_device *device)
+{
+	struct msm_tspp_platform_data *pdata =
+		device->pdev->dev.platform_data;
+	tspp_gpios_disable_free(pdata->gpios, pdata->num_gpios);
+}
+
+/*** TSIF functions ***/
+static int tspp_start_tsif(struct tspp_tsif_device *tsif_device)
+{
+	int start_hardware = 0;
+	u32 ctl;
+
+	if (tsif_device->ref_count == 0) {
+		start_hardware = 1;
+	} else if (tsif_device->ref_count > 0) {
+		ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+		if ((ctl & TSIF_STS_CTL_START) != 1) {
+			/* this hardware should already be running */
+			pr_warn("tspp: tsif hw not started but ref count > 0");
+			start_hardware = 1;
+		}
+	}
+
+	if (start_hardware) {
+		if (loopback_mode) {
+			ctl = TSIF_STS_CTL_EN_IRQ |
+				TSIF_STS_CTL_EN_NULL |
+				TSIF_STS_CTL_EN_ERROR |
+				TSIF_STS_CTL_TEST_MODE |
+				TSIF_STS_CTL_EN_DM;
+	TSPP_DEBUG("tspp: starting tsif hw in loopback mode 0x%x", ctl);
+		} else {
+			ctl = TSIF_STS_CTL_EN_IRQ |
+				TSIF_STS_CTL_EN_TIME_LIM |
+				TSIF_STS_CTL_EN_TCR |
+				TSIF_STS_CTL_EN_DM;
+		}
+		writel_relaxed(ctl, tsif_device->base + TSIF_STS_CTL_OFF);
+		writel_relaxed(tsif_device->time_limit,
+			  tsif_device->base + TSIF_TIME_LIMIT_OFF);
+		wmb();
+		writel_relaxed(ctl | TSIF_STS_CTL_START,
+			  tsif_device->base + TSIF_STS_CTL_OFF);
+		wmb();
+		ctl = readl_relaxed(tsif_device->base + TSIF_STS_CTL_OFF);
+	}
+
+	tsif_device->ref_count++;
+
+	return (ctl & TSIF_STS_CTL_START) ? 0 : -EFAULT;
+}
+
+static void tspp_stop_tsif(struct tspp_tsif_device *tsif_device)
+{
+	if (tsif_device->ref_count == 0)
+		return;
+
+	tsif_device->ref_count--;
+
+	if (tsif_device->ref_count == 0) {
+		writel_relaxed(TSIF_STS_CTL_STOP,
+			tsif_device->base + TSIF_STS_CTL_OFF);
+		wmb();
+	}
+}
+
+/*** TSPP functions ***/
+static int tspp_get_key_entry(void)
+{
+	int i;
+	for (i = 0; i < TSPP_NUM_KEYS; i++) {
+		if (!(tspp_key_entry & (1 << i))) {
+			tspp_key_entry |= (1 << i);
+			return i;
+		}
+	}
+	return 1;
+}
+
+static void tspp_free_key_entry(int entry)
+{
+	if (entry > TSPP_NUM_KEYS) {
+		pr_err("tspp_free_key_entry: index out of bounds");
+		return;
+	}
+
+	tspp_key_entry &= ~(1 << entry);
+}
+
+static int tspp_alloc_buffer(struct sps_mem_buffer *mem,
+	struct tspp_channel *channel)
+{
+	if (channel->bufsize < TSPP_MIN_BUFFER_SIZE ||
+		channel->bufsize > TSPP_MAX_BUFFER_SIZE) {
+		pr_err("tspp: bad buffer size");
+		return 1;
+	}
+
+	switch (channel->mode) {
+	case TSPP_MODE_DISABLED:
+		mem->size = 0;
+		pr_err("tspp: channel is disabled");
+		return 1;
+
+	case TSPP_MODE_PES:
+		/* give the user what he asks for */
+		mem->size = channel->bufsize;
+		break;
+
+	case TSPP_MODE_RAW:
+		/* must be a multiple of 192 */
+		if (channel->bufsize < (TSPP_PACKET_LENGTH+4))
+			mem->size = (TSPP_PACKET_LENGTH+4);
+		else
+			mem->size = (channel->bufsize /
+				(TSPP_PACKET_LENGTH+4)) *
+				(TSPP_PACKET_LENGTH+4);
+		break;
+
+	case TSPP_MODE_RAW_NO_SUFFIX:
+		/* must be a multiple of 188 */
+		mem->size = (channel->bufsize / TSPP_PACKET_LENGTH) *
+			TSPP_PACKET_LENGTH;
+		break;
+	}
+
+#ifdef TSPP_USE_DMA_ALLOC_COHERENT
+	mem->base = dma_alloc_coherent(NULL, mem->size,
+		&mem->phys_base, GFP_KERNEL);
+	if (mem->base == 0) {
+		pr_err("tspp dma alloc coherent failed %i", mem->size);
+		return -ENOMEM;
+	}
+#else
+	mem->base = kmalloc(mem->size, GFP_KERNEL);
+	if (mem->base == 0) {
+		pr_err("tspp buffer allocation failed %i", mem->size);
+		return -ENOMEM;
+	}
+	mem->phys_base = dma_map_single(NULL,
+		mem->base,
+		mem->size,
+		DMA_FROM_DEVICE);
+#endif
+
+	return 0;
+}
+
+static int tspp_global_reset(struct tspp_device *pdev)
+{
+	u32 i, val;
+
+	/* stop all TSIFs */
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++) {
+		pdev->tsif[i].ref_count = 1; /* allows stopping hw */
+		tspp_stop_tsif(&pdev->tsif[i]); /* will reset ref_count to 0 */
+		pdev->tsif[i].time_limit = TSPP_TSIF_DEFAULT_TIME_LIMIT;
+	}
+	writel_relaxed(TSPP_RST_RESET, pdev->base + TSPP_RST);
+	wmb();
+
+	/* BAM */
+	if (sps_device_reset(pdev->bam_handle) != 0) {
+		pr_err("tspp: error resetting bam");
+		return 1;
+	}
+
+	/* TSPP tables */
+	for (i = 0; i < TSPP_FILTER_TABLES; i++)
+		memset(tspp_filter_table[i],
+			0, sizeof(struct tspp_pid_filter_table));
+
+	/* disable all filters */
+	val = (2 << TSPP_NUM_CHANNELS) - 1;
+	writel_relaxed(val, pdev->base + TSPP_PS_DISABLE);
+
+	/* TSPP registers */
+	val = readl_relaxed(pdev->base + TSPP_CONTROL);
+	writel_relaxed(val | TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+		pdev->base + TSPP_CONTROL);
+	wmb();
+	memset(tspp_global_performance, 0,
+		sizeof(struct tspp_global_performance_regs));
+	memset(tspp_pipe_context, 0,
+		sizeof(struct tspp_pipe_context_regs));
+	memset(tspp_pipe_performance, 0,
+		sizeof(struct tspp_pipe_performance_regs));
+	wmb();
+	writel_relaxed(val & ~TSPP_CLK_CONTROL_FORCE_PERF_CNT,
+		pdev->base + TSPP_CONTROL);
+	wmb();
+
+	val = readl_relaxed(pdev->base + TSPP_CONFIG);
+	val &= ~(TSPP_CONFIG_PS_LEN_ERR_MASK |
+			TSPP_CONFIG_PS_CONT_ERR_UNSP_MASK |
+			TSPP_CONFIG_PS_CONT_ERR_MASK);
+	TSPP_CONFIG_SET_PACKET_LENGTH(val, TSPP_PACKET_LENGTH);
+	writel_relaxed(val, pdev->base + TSPP_CONFIG);
+	writel_relaxed(0x000fffff, pdev->base + TSPP_IRQ_MASK);
+	writel_relaxed(0x000fffff, pdev->base + TSPP_IRQ_CLEAR);
+	writel_relaxed(0, pdev->base + TSPP_RST);
+	wmb();
+
+	tspp_key_entry = 0;
+
+	return 0;
+}
+
+int tspp_open_stream(struct tspp_channel *channel, enum tspp_source src)
+{
+	u32 val;
+	struct tspp_device *pdev;
+
+	if (!channel)
+		return 1;
+
+	pdev = channel->pdev;
+
+	switch (src) {
+	case TSPP_SOURCE_TSIF0:
+		/* make sure TSIF0 is running & enabled */
+		if (tspp_start_tsif(&pdev->tsif[0]) != 0) {
+			pr_err("tspp: error starting tsif0");
+			return 1;
+		}
+		val = readl_relaxed(pdev->base + TSPP_CONTROL);
+		writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+			pdev->base + TSPP_CONTROL);
+		wmb();
+		break;
+	case TSPP_SOURCE_TSIF1:
+		/* make sure TSIF1 is running & enabled */
+		if (tspp_start_tsif(&pdev->tsif[1]) != 0) {
+			pr_err("tspp: error starting tsif1");
+			return 1;
+		}
+		val = readl_relaxed(pdev->base + TSPP_CONTROL);
+		writel_relaxed(val & ~TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+			pdev->base + TSPP_CONTROL);
+		wmb();
+		break;
+	case TSPP_SOURCE_MEM:
+		break;
+	default:
+		pr_warn("tspp: channel %i invalid source %i", channel->id, src);
+		return 1;
+	}
+
+	channel->src = src;
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_open_stream);
+
+int tspp_close_stream(struct tspp_channel *channel)
+{
+	u32 val;
+	struct tspp_device *pdev;
+
+	pdev = channel->pdev;
+
+	switch (channel->src) {
+	case TSPP_SOURCE_TSIF0:
+		tspp_stop_tsif(&pdev->tsif[0]);
+		val = readl_relaxed(pdev->base + TSPP_CONTROL);
+		writel_relaxed(val | TSPP_CONTROL_TSP_TSIF0_SRC_DIS,
+			pdev->base + TSPP_CONTROL);
+		wmb();
+		break;
+	case TSPP_SOURCE_TSIF1:
+		tspp_stop_tsif(&pdev->tsif[1]);
+		val = readl_relaxed(pdev->base + TSPP_CONTROL);
+		writel_relaxed(val | TSPP_CONTROL_TSP_TSIF1_SRC_DIS,
+			pdev->base + TSPP_CONTROL);
+		break;
+	case TSPP_SOURCE_MEM:
+		break;
+	case TSPP_SOURCE_NONE:
+		break;
+	}
+
+	channel->src = -1;
+	return 0;
+}
+EXPORT_SYMBOL(tspp_close_stream);
+
+int tspp_open_channel(struct tspp_channel *channel)
+{
+	int rc = 0;
+	struct sps_connect *config = &channel->config;
+	struct sps_register_event *event = &channel->event;
+
+	if (channel->used) {
+		pr_err("tspp channel already in use");
+		return 1;
+	}
+
+	/* mark it as used */
+	channel->used = 1;
+
+	/* start the bam  */
+	channel->pipe = sps_alloc_endpoint();
+	if (channel->pipe == 0) {
+		pr_err("tspp: error allocating endpoint");
+		rc = -ENOMEM;
+		goto err_sps_alloc;
+	}
+
+	/* get default configuration */
+	sps_get_config(channel->pipe, config);
+
+	config->source = channel->pdev->bam_handle;
+	config->destination = SPS_DEV_HANDLE_MEM;
+	config->mode = SPS_MODE_SRC;
+	config->options = SPS_O_AUTO_ENABLE |
+		SPS_O_EOT | SPS_O_ACK_TRANSFERS;
+	config->src_pipe_index = channel->id;
+	config->desc.size =
+		(TSPP_SPS_DESCRIPTOR_COUNT + 1) * SPS_DESCRIPTOR_SIZE;
+	config->desc.base = dma_alloc_coherent(NULL,
+						config->desc.size,
+						&config->desc.phys_base,
+						GFP_KERNEL);
+	if (config->desc.base == 0) {
+		pr_err("tspp: error allocating sps descriptors");
+		rc = -ENOMEM;
+		goto err_desc_alloc;
+	}
+
+	memset(config->desc.base, 0, config->desc.size);
+
+	rc = sps_connect(channel->pipe, config);
+	if (rc) {
+		pr_err("tspp: error connecting bam");
+		goto err_connect;
+	}
+
+	event->mode = SPS_TRIGGER_CALLBACK;
+	event->options = SPS_O_EOT;
+	event->callback = tspp_sps_complete_cb;
+	event->xfer_done = NULL;
+	event->user = channel;
+
+	rc = sps_register_event(channel->pipe, event);
+	if (rc) {
+		pr_err("tspp: error registering event");
+		goto err_event;
+	}
+
+	rc = pm_runtime_get(&channel->pdev->pdev->dev);
+	if (rc < 0) {
+		dev_err(&channel->pdev->pdev->dev,
+			"Runtime PM: Unable to wake up tspp device, rc = %d",
+			rc);
+	}
+
+	wake_lock(&channel->pdev->wake_lock);
+	return 0;
+
+err_event:
+	sps_disconnect(channel->pipe);
+err_connect:
+	dma_free_coherent(NULL, config->desc.size, config->desc.base,
+		config->desc.phys_base);
+err_desc_alloc:
+	sps_free_endpoint(channel->pipe);
+err_sps_alloc:
+	return rc;
+}
+EXPORT_SYMBOL(tspp_open_channel);
+
+int tspp_close_channel(struct tspp_channel *channel)
+{
+	int i;
+	int id;
+	u32 val;
+	struct sps_connect *config = &channel->config;
+	struct tspp_device *pdev = channel->pdev;
+
+	TSPP_DEBUG("tspp_close_channel");
+	channel->used = 0;
+
+	/* disable pipe (channel) */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+	wmb();
+
+	/* unregister all filters for this channel */
+	for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+		struct tspp_pid_filter *tspp_filter =
+			&tspp_filter_table[channel->src]->filter[i];
+		id = FILTER_GET_PIPE_NUMBER0(tspp_filter);
+		if (id == channel->id) {
+			if (FILTER_HAS_ENCRYPTION(tspp_filter))
+				tspp_free_key_entry(
+					FILTER_GET_KEY_NUMBER(tspp_filter));
+			tspp_filter->config = 0;
+			tspp_filter->filter = 0;
+		}
+	}
+	channel->filter_count = 0;
+
+	/* stop the stream */
+	tspp_close_stream(channel);
+
+	/* disconnect the bam */
+	if (sps_disconnect(channel->pipe) != 0)
+		pr_warn("tspp: Error freeing sps endpoint (%i)", channel->id);
+
+	/* destroy the buffers */
+	dma_free_coherent(NULL, config->desc.size, config->desc.base,
+		config->desc.phys_base);
+
+	for (i = 0; i < TSPP_NUM_BUFFERS; i++) {
+		if (channel->buffer[i].mem.phys_base) {
+#ifdef TSPP_USE_DMA_ALLOC_COHERENT
+			dma_free_coherent(NULL,
+				channel->buffer[i].mem.size,
+				channel->buffer[i].mem.base,
+				channel->buffer[i].mem.phys_base);
+#else
+			dma_unmap_single(channel->dd,
+			channel->buffer[i].mem.phys_base,
+			channel->buffer[i].mem.size,
+			0);
+			kfree(channel->buffer[i].mem.base);
+#endif
+			channel->buffer[i].mem.phys_base = 0;
+		}
+		channel->buffer[i].mem.base = 0;
+		channel->buffer[i].state = TSPP_BUF_STATE_EMPTY;
+	}
+	channel->buffer_count = 0;
+
+	wake_unlock(&channel->pdev->wake_lock);
+	return 0;
+}
+EXPORT_SYMBOL(tspp_close_channel);
+
+/* picks a stream for this channel */
+int tspp_select_source(struct tspp_channel *channel,
+	struct tspp_select_source *src)
+{
+	/* make sure the requested src id is in bounds */
+	if (src->source > TSPP_SOURCE_MEM) {
+		pr_err("tspp source out of bounds");
+		return 1;
+	}
+
+	/* open the stream */
+	tspp_open_stream(channel, src->source);
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_select_source);
+
+int tspp_add_filter(struct tspp_channel *channel,
+	struct tspp_filter *filter)
+{
+	int i;
+	int other_channel;
+	int entry;
+	u32 val, pid, enabled;
+	struct tspp_device *pdev = channel->pdev;
+	struct tspp_pid_filter p;
+
+	TSPP_DEBUG("tspp_add_filter");
+	if (filter->source > TSPP_SOURCE_MEM) {
+		pr_err("tspp invalid source");
+		return 1;
+	}
+
+	if (filter->priority >= TSPP_NUM_PRIORITIES) {
+		pr_err("tspp invalid source");
+		return 1;
+	}
+
+	/* make sure this filter mode matches the channel mode */
+	switch (channel->mode) {
+	case TSPP_MODE_DISABLED:
+		channel->mode = filter->mode;
+		break;
+	case TSPP_MODE_RAW:
+	case TSPP_MODE_PES:
+	case TSPP_MODE_RAW_NO_SUFFIX:
+		if (filter->mode != channel->mode) {
+			pr_err("tspp: wrong filter mode");
+			return 1;
+		}
+	}
+
+	if (filter->mode == TSPP_MODE_PES) {
+		for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+			struct tspp_pid_filter *tspp_filter =
+				&tspp_filter_table[channel->src]->filter[i];
+			pid = FILTER_GET_PIPE_PID((tspp_filter));
+			enabled = FILTER_GET_PIPE_PROCESS0(tspp_filter);
+			if (enabled && (pid == filter->pid)) {
+				other_channel =
+					FILTER_GET_PIPE_NUMBER0(tspp_filter);
+				pr_err("tspp: pid 0x%x already in use by channel %i",
+					filter->pid, other_channel);
+				return 1;
+			}
+		}
+	}
+
+	/* make sure this priority is not already in use */
+	enabled = FILTER_GET_PIPE_PROCESS0(
+		(&(tspp_filter_table[channel->src]->filter[filter->priority])));
+	if (enabled) {
+		pr_err("tspp: filter priority %i source %i is already enabled\n",
+			filter->priority, channel->src);
+		return 1;
+	}
+
+	if (channel->mode == TSPP_MODE_PES) {
+		/* if we are already processing in PES mode, disable pipe
+		(channel) and filter to be updated */
+		val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+		writel_relaxed(val | (1 << channel->id),
+			pdev->base + TSPP_PS_DISABLE);
+		wmb();
+	}
+
+	/* update entry */
+	p.filter = 0;
+	p.config = 0;
+	FILTER_SET_PIPE_PROCESS0((&p), filter->mode);
+	FILTER_SET_PIPE_PID((&p), filter->pid);
+	FILTER_SET_PID_MASK((&p), filter->mask);
+	FILTER_SET_PIPE_NUMBER0((&p), channel->id);
+	FILTER_SET_PIPE_PROCESS1((&p), TSPP_MODE_DISABLED);
+	if (filter->decrypt) {
+		entry = tspp_get_key_entry();
+		if (entry == -1) {
+			pr_err("tspp: no more keys available!");
+		} else {
+			p.config |= FILTER_DECRYPT;
+			FILTER_SET_KEY_NUMBER((&p), entry);
+		}
+	}
+	TSPP_DEBUG("tspp_add_filter: mode=%i pid=%i mask=%i channel=%i",
+		filter->mode, filter->pid, filter->mask, channel->id);
+
+	tspp_filter_table[channel->src]->
+		filter[filter->priority].config = p.config;
+	tspp_filter_table[channel->src]->
+		filter[filter->priority].filter = p.filter;
+
+	/* reenable pipe */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val & ~(1 << channel->id), pdev->base + TSPP_PS_DISABLE);
+	wmb();
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+	/* allocate buffers if needed */
+	if (channel->buffer_count == 0) {
+		TSPP_DEBUG("tspp: no buffers need %i", TSPP_NUM_BUFFERS);
+		for (i = 0; i < TSPP_NUM_BUFFERS; i++) {
+			if (tspp_alloc_buffer(&channel->buffer[i].mem,
+				channel) != 0) {
+				pr_warn("tspp: Can't allocate buffer %i", i);
+			} else {
+				channel->buffer[i].filled = 0;
+				channel->buffer[i].read_index = 0;
+				channel->buffer_count++;
+
+				/* start the transfer */
+				if (sps_transfer_one(channel->pipe,
+					channel->buffer[i].mem.phys_base,
+					channel->buffer[i].mem.size,
+					channel,
+					SPS_IOVEC_FLAG_INT |
+					SPS_IOVEC_FLAG_EOB))
+					pr_err("tspp: can't submit transfer");
+				else
+					channel->buffer[i].state =
+						TSPP_BUF_STATE_WAITING;
+			}
+		}
+	}
+
+	if (channel->buffer_count < MIN_ACCEPTABLE_BUFFER_COUNT) {
+		pr_err("failed to allocate at least %i buffers",
+			MIN_ACCEPTABLE_BUFFER_COUNT);
+		return -ENOMEM;
+	}
+
+	channel->filter_count++;
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_add_filter);
+
+int tspp_remove_filter(struct tspp_channel *channel,
+	struct tspp_filter *filter)
+{
+	int entry;
+	u32 val;
+	struct tspp_device *pdev = channel->pdev;
+	int src = channel->src;
+	struct tspp_pid_filter *tspp_filter =
+		&(tspp_filter_table[src]->filter[filter->priority]);
+
+	/* disable pipe (channel) */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val | channel->id, pdev->base + TSPP_PS_DISABLE);
+	wmb();
+
+	/* update data keys */
+	if (tspp_filter->config & FILTER_DECRYPT) {
+		entry = FILTER_GET_KEY_NUMBER(tspp_filter);
+		tspp_free_key_entry(entry);
+	}
+
+	/* update pid table */
+	tspp_filter->config = 0;
+	tspp_filter->filter = 0;
+
+	channel->filter_count--;
+
+	/* reenable pipe */
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+	writel_relaxed(val & ~(1 << channel->id),
+		pdev->base + TSPP_PS_DISABLE);
+	wmb();
+	val = readl_relaxed(pdev->base + TSPP_PS_DISABLE);
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_remove_filter);
+
+int tspp_set_key(struct tspp_channel *channel, struct tspp_key* key)
+{
+	int i;
+	int id;
+	int key_index;
+	int data;
+
+	/* read the key index used by this channel */
+	for (i = 0; i < TSPP_NUM_PRIORITIES; i++) {
+		struct tspp_pid_filter *tspp_filter =
+			&(tspp_filter_table[channel->src]->filter[i]);
+		id = FILTER_GET_PIPE_NUMBER0(tspp_filter);
+		if (id == channel->id) {
+			if (FILTER_HAS_ENCRYPTION(tspp_filter)) {
+				key_index = FILTER_GET_KEY_NUMBER(tspp_filter);
+				break;
+			}
+		}
+	}
+	if (i == TSPP_NUM_PRIORITIES) {
+		pr_err("tspp: no encryption on this channel");
+		return 1;
+	}
+
+	if (key->parity == TSPP_KEY_PARITY_EVEN) {
+		tspp_key_table->entry[key_index].even_lsb = key->lsb;
+		tspp_key_table->entry[key_index].even_msb = key->msb;
+	} else {
+		tspp_key_table->entry[key_index].odd_lsb = key->lsb;
+		tspp_key_table->entry[key_index].odd_msb = key->msb;
+	}
+	data = readl_relaxed(channel->pdev->base + TSPP_KEY_VALID);
+
+	return 0;
+}
+EXPORT_SYMBOL(tspp_set_key);
+
+static int tspp_set_iv(struct tspp_channel *channel, struct tspp_iv *iv)
+{
+	struct tspp_device *pdev = channel->pdev;
+
+	writel_relaxed(iv->data[0], pdev->base + TSPP_CBC_INIT_VAL(0));
+	writel_relaxed(iv->data[1], pdev->base + TSPP_CBC_INIT_VAL(1));
+	return 0;
+}
+
+static int tspp_set_system_keys(struct tspp_channel *channel,
+	struct tspp_system_keys *keys)
+{
+	int i;
+	struct tspp_device *pdev = channel->pdev;
+
+	for (i = 0; i < TSPP_NUM_SYSTEM_KEYS; i++)
+		writel_relaxed(keys->data[i], pdev->base + TSPP_SYSTEM_KEY(i));
+
+	return 0;
+}
+
+static int tspp_set_buffer_size(struct tspp_channel *channel,
+	struct tspp_buffer *buf)
+{
+	if (buf->size < TSPP_MIN_BUFFER_SIZE)
+		channel->bufsize = TSPP_MIN_BUFFER_SIZE;
+	else if (buf->size > TSPP_MAX_BUFFER_SIZE)
+		channel->bufsize = TSPP_MAX_BUFFER_SIZE;
+	else
+		channel->bufsize = buf->size;
+
+	TSPP_DEBUG("tspp channel %i buffer size %i",
+		channel->id, channel->bufsize);
+
+	return 0;
+}
+
+/*** File Operations ***/
+static ssize_t tspp_open(struct inode *inode, struct file *filp)
+{
+	struct tspp_channel *channel;
+	channel = container_of(inode->i_cdev, struct tspp_channel, cdev);
+	filp->private_data = channel;
+
+	/* if this channel is already in use, quit */
+	if (channel->used) {
+		pr_err("tspp channel %i already in use",
+			MINOR(channel->cdev.dev));
+		return -EACCES;
+	}
+
+	if (tspp_open_channel(channel) != 0) {
+		pr_err("tspp: error opening channel");
+		return -EACCES;
+	}
+
+	return 0;
+}
+
+static unsigned int tspp_poll(struct file *filp, struct poll_table_struct *p)
+{
+	unsigned long flags;
+	unsigned int mask = 0;
+	struct tspp_channel *channel;
+	channel = filp->private_data;
+
+	/* register the wait queue for this channel */
+	poll_wait(filp, &channel->in_queue, p);
+
+	spin_lock_irqsave(&channel->pdev->spinlock, flags);
+	if (channel->buffer[channel->read].state == TSPP_BUF_STATE_DATA)
+		mask = POLLIN | POLLRDNORM;
+
+	spin_unlock_irqrestore(&channel->pdev->spinlock, flags);
+
+	return mask;
+}
+
+static ssize_t tspp_release(struct inode *inode, struct file *filp)
+{
+	struct tspp_channel *channel;
+	channel = filp->private_data;
+
+	pr_info("tspp_release");
+	tspp_close_channel(channel);
+
+	return 0;
+}
+
+static ssize_t tspp_read(struct file *filp, char __user *buf, size_t count,
+			 loff_t *f_pos)
+{
+	size_t size = 0;
+	size_t transferred = 0;
+	struct tspp_channel *channel;
+	struct tspp_mem_buffer *buffer;
+	channel = filp->private_data;
+
+	TSPP_DEBUG("tspp_read");
+	buffer = &channel->buffer[channel->read];
+	/* see if we have any buffers ready to read */
+	while (buffer->state != TSPP_BUF_STATE_DATA) {
+		if (filp->f_flags & O_NONBLOCK) {
+			pr_warn("tspp: nothing to read on channel %i!",
+				channel->id);
+			return -EAGAIN;
+		}
+		/* go to sleep if there is nothing to read */
+	   TSPP_DEBUG("tspp: sleeping");
+		if (wait_event_interruptible(channel->in_queue,
+			(buffer->state == TSPP_BUF_STATE_DATA))) {
+			pr_err("tspp: rude awakening\n");
+			return -ERESTARTSYS;
+		}
+	}
+
+	while (buffer->state == TSPP_BUF_STATE_DATA) {
+		size = min(count, buffer->filled);
+		TSPP_DEBUG("tspp: reading channel %i buffer %i size %i",
+			channel->id, channel->read, size);
+		if (size == 0)
+			break;
+
+#ifndef TSPP_USE_DMA_ALLOC_COHERENT
+		/* unmap buffer (invalidates processor cache) */
+		if (buffer->mem.phys_base) {
+			dma_unmap_single(NULL,
+				buffer->mem.phys_base,
+				buffer->mem.size,
+				DMA_FROM_DEVICE);
+			buffer->mem.phys_base = 0;
+		}
+#endif
+
+		if (copy_to_user(buf, buffer->mem.base +
+			buffer->read_index, size)) {
+			pr_err("tspp: error copying to user buffer");
+			return -EFAULT;
+		}
+		buf += size;
+		count -= size;
+		transferred += size;
+		buffer->read_index += size;
+
+		/* after reading the end of the buffer, requeue it,
+			and set up for reading the next one */
+		if (buffer->read_index ==
+			channel->buffer[channel->read].filled) {
+			buffer->state = TSPP_BUF_STATE_WAITING;
+#ifndef TSPP_USE_DMA_ALLOC_COHERENT
+			buffer->mem.phys_base = dma_map_single(NULL,
+				buffer->mem.base,
+				buffer->mem.size,
+				DMA_FROM_DEVICE);
+			if (!dma_mapping_error(NULL,
+			buffer->mem.phys_base)) {
+#endif
+				if (sps_transfer_one(channel->pipe,
+					buffer->mem.phys_base,
+					buffer->mem.size,
+					channel,
+					SPS_IOVEC_FLAG_INT |
+					SPS_IOVEC_FLAG_EOT))
+					pr_err("tspp: can't submit transfer");
+				else {
+					channel->read++;
+					if (channel->read == TSPP_NUM_BUFFERS)
+						channel->read = 0;
+				}
+#ifndef TSPP_USE_DMA_ALLOC_COHERENT
+			}
+#endif
+		}
+	}
+
+	return transferred;
+}
+
+static long tspp_ioctl(struct file *filp,
+			unsigned int param0, unsigned long param1)
+{
+	int rc = -1;
+	struct tspp_channel *channel;
+	channel = filp->private_data;
+
+	if (!param1)
+		return -EINVAL;
+
+	switch (param0) {
+	case TSPP_IOCTL_SELECT_SOURCE:
+		rc = tspp_select_source(channel,
+			(struct tspp_select_source *)param1);
+		break;
+	case TSPP_IOCTL_ADD_FILTER:
+		rc = tspp_add_filter(channel,
+			(struct tspp_filter *)param1);
+		break;
+	case TSPP_IOCTL_REMOVE_FILTER:
+		rc = tspp_remove_filter(channel,
+			(struct tspp_filter *)param1);
+		break;
+	case TSPP_IOCTL_SET_KEY:
+		rc = tspp_set_key(channel,
+			(struct tspp_key *)param1);
+		break;
+	case TSPP_IOCTL_SET_IV:
+		rc = tspp_set_iv(channel,
+			(struct tspp_iv *)param1);
+		break;
+	case TSPP_IOCTL_SET_SYSTEM_KEYS:
+		rc = tspp_set_system_keys(channel,
+			(struct tspp_system_keys *)param1);
+		break;
+	case TSPP_IOCTL_BUFFER_SIZE:
+		rc = tspp_set_buffer_size(channel,
+			(struct tspp_buffer *)param1);
+		break;
+	case TSPP_IOCTL_LOOPBACK:
+		loopback_mode = param1;
+		rc = 0;
+		break;
+	default:
+		pr_err("tspp: Unknown ioctl %i", param0);
+	}
+
+	/* normalize the return code in case one of the subfunctions does
+		something weird */
+	if (rc != 0)
+		rc = 1;
+
+	return rc;
+}
+
+/*** debugfs ***/
+#ifdef TSPP_USE_DEBUGFS
+static int debugfs_iomem_x32_set(void *data, u64 val)
+{
+	writel_relaxed(val, data);
+	wmb();
+	return 0;
+}
+
+static int debugfs_iomem_x32_get(void *data, u64 *val)
+{
+	*val = readl_relaxed(data);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_iomem_x32, debugfs_iomem_x32_get,
+			debugfs_iomem_x32_set, "0x%08llx");
+
+static void tsif_debugfs_init(struct tspp_tsif_device *tsif_device,
+	int instance)
+{
+	char name[10];
+	snprintf(name, 10, "tsif%i", instance);
+	tsif_device->dent_tsif = debugfs_create_dir(
+	      name, NULL);
+	if (tsif_device->dent_tsif) {
+		int i;
+		void __iomem *base = tsif_device->base;
+		for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++) {
+			tsif_device->debugfs_tsif_regs[i] =
+			   debugfs_create_file(
+				debugfs_tsif_regs[i].name,
+				debugfs_tsif_regs[i].mode,
+				tsif_device->dent_tsif,
+				base + debugfs_tsif_regs[i].offset,
+				&fops_iomem_x32);
+		}
+	}
+}
+
+static void tsif_debugfs_exit(struct tspp_tsif_device *tsif_device)
+{
+	if (tsif_device->dent_tsif) {
+		int i;
+		debugfs_remove_recursive(tsif_device->dent_tsif);
+		tsif_device->dent_tsif = NULL;
+		for (i = 0; i < ARRAY_SIZE(debugfs_tsif_regs); i++)
+			tsif_device->debugfs_tsif_regs[i] = NULL;
+	}
+}
+
+static void tspp_debugfs_init(struct tspp_device *device, int instance)
+{
+	char name[10];
+	snprintf(name, 10, "tspp%i", instance);
+	device->dent = debugfs_create_dir(
+	      name, NULL);
+	if (device->dent) {
+		int i;
+		void __iomem *base = device->base;
+		for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++) {
+			device->debugfs_regs[i] =
+			   debugfs_create_file(
+				debugfs_tspp_regs[i].name,
+				debugfs_tspp_regs[i].mode,
+				device->dent,
+				base + debugfs_tspp_regs[i].offset,
+				&fops_iomem_x32);
+		}
+	}
+}
+
+static void tspp_debugfs_exit(struct tspp_device *device)
+{
+	if (device->dent) {
+		int i;
+		debugfs_remove_recursive(device->dent);
+		device->dent = NULL;
+		for (i = 0; i < ARRAY_SIZE(debugfs_tspp_regs); i++)
+			device->debugfs_regs[i] = NULL;
+	}
+}
+#endif /* TSPP_USE_DEBUGFS */
+
+static const struct file_operations tspp_fops = {
+	.owner   = THIS_MODULE,
+	.read    = tspp_read,
+	.open    = tspp_open,
+	.poll    = tspp_poll,
+	.release = tspp_release,
+	.unlocked_ioctl   = tspp_ioctl,
+};
+
+static int tspp_channel_init(struct tspp_channel *channel)
+{
+	channel->bufsize = TSPP_MIN_BUFFER_SIZE;
+	channel->read = 0;
+	channel->waiting = 0;
+	cdev_init(&channel->cdev, &tspp_fops);
+	channel->cdev.owner = THIS_MODULE;
+	channel->id = MINOR(tspp_minor);
+	init_waitqueue_head(&channel->in_queue);
+	channel->buffer_count = 0;
+	channel->filter_count = 0;
+
+	if (cdev_add(&channel->cdev, tspp_minor++, 1) != 0) {
+		pr_err("tspp: cdev_add failed");
+		return 1;
+	}
+
+	channel->dd = device_create(tspp_class, NULL, channel->cdev.dev,
+		channel, "tspp%02d", channel->id);
+	if (IS_ERR(channel->dd)) {
+		pr_err("tspp: device_create failed: %i",
+			(int)PTR_ERR(channel->dd));
+		cdev_del(&channel->cdev);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int __devinit msm_tspp_probe(struct platform_device *pdev)
+{
+	int rc = -ENODEV;
+	u32 version;
+	u32 i;
+	struct msm_tspp_platform_data *data;
+	struct tspp_device *device;
+	struct resource *mem_tsif0;
+	struct resource *mem_tsif1;
+	struct resource *mem_tspp;
+	struct resource *mem_bam;
+	struct tspp_channel *channel;
+
+	/* must have platform data */
+	data = pdev->dev.platform_data;
+	if (!data) {
+		pr_err("tspp: Platform data not available");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* check for valid device id */
+	if ((pdev->id < 0) || (pdev->id >= 1)) {
+		pr_err("tspp: Invalid device ID %d", pdev->id);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	/* OK, we will use this device */
+	device = kzalloc(sizeof(struct tspp_device), GFP_KERNEL);
+	if (!device) {
+		pr_err("tspp: Failed to allocate memory for device");
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	/* set up references */
+	device->pdev = pdev;
+	platform_set_drvdata(pdev, device);
+
+	/* map clocks */
+	if (data->tsif_pclk) {
+		device->tsif_pclk = clk_get(NULL, data->tsif_pclk);
+		if (IS_ERR(device->tsif_pclk)) {
+			pr_err("tspp: failed to get %s",
+				data->tsif_pclk);
+			rc = PTR_ERR(device->tsif_pclk);
+			device->tsif_pclk = NULL;
+			goto err_pclock;
+		}
+	}
+	if (data->tsif_ref_clk) {
+		device->tsif_ref_clk = clk_get(NULL, data->tsif_ref_clk);
+		if (IS_ERR(device->tsif_ref_clk)) {
+			pr_err("tspp: failed to get %s",
+				data->tsif_ref_clk);
+			rc = PTR_ERR(device->tsif_ref_clk);
+			device->tsif_ref_clk = NULL;
+			goto err_refclock;
+		}
+	}
+
+	/* map I/O memory */
+	mem_tsif0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!mem_tsif0) {
+		pr_err("tspp: Missing tsif0 MEM resource");
+		rc = -ENXIO;
+		goto err_res_tsif0;
+	}
+	device->tsif[0].base = ioremap(mem_tsif0->start,
+		resource_size(mem_tsif0));
+	if (!device->tsif[0].base) {
+		pr_err("tspp: ioremap failed");
+		goto err_map_tsif0;
+	}
+
+	mem_tsif1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!mem_tsif1) {
+		dev_err(&pdev->dev, "Missing tsif1 MEM resource");
+		rc = -ENXIO;
+		goto err_res_tsif1;
+	}
+	device->tsif[1].base = ioremap(mem_tsif1->start,
+		resource_size(mem_tsif1));
+	if (!device->tsif[1].base) {
+		dev_err(&pdev->dev, "ioremap failed");
+		goto err_map_tsif1;
+	}
+
+	mem_tspp = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	if (!mem_tspp) {
+		dev_err(&pdev->dev, "Missing MEM resource");
+		rc = -ENXIO;
+		goto err_res_dev;
+	}
+	device->base = ioremap(mem_tspp->start, resource_size(mem_tspp));
+	if (!device->base) {
+		dev_err(&pdev->dev, "ioremap failed");
+		goto err_map_dev;
+	}
+
+	mem_bam = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+	if (!mem_bam) {
+		pr_err("tspp: Missing bam MEM resource");
+		rc = -ENXIO;
+		goto err_res_bam;
+	}
+	memset(&device->bam_props, 0, sizeof(device->bam_props));
+	device->bam_props.phys_addr = mem_bam->start;
+	device->bam_props.virt_addr = ioremap(mem_bam->start,
+		resource_size(mem_bam));
+	if (!device->bam_props.virt_addr) {
+		dev_err(&pdev->dev, "ioremap failed");
+		goto err_map_bam;
+	}
+
+	/* map TSPP IRQ */
+	rc = platform_get_irq(pdev, 0);
+	if (rc > 0) {
+		device->tspp_irq = rc;
+		rc = request_irq(device->tspp_irq, tspp_isr, IRQF_SHARED,
+				 dev_name(&pdev->dev), device);
+		if (rc) {
+			dev_err(&pdev->dev, "failed to request IRQ %d : %d",
+				device->tspp_irq, rc);
+			goto err_irq;
+		}
+	} else {
+		dev_err(&pdev->dev, "failed to get tspp IRQ");
+		goto err_irq;
+	}
+
+	/* BAM IRQ */
+	device->bam_irq = TSIF_BAM_IRQ;
+
+	/* GPIOs */
+	rc = tspp_start_gpios(device);
+	if (rc)
+		goto err_gpio;
+
+	/* power management */
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+#ifdef TSPP_USE_DEBUGFS
+	tspp_debugfs_init(device, 0);
+
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		tsif_debugfs_init(&device->tsif[i], i);
+#endif /* TSPP_USE_DEBUGFS */
+
+	wake_lock_init(&device->wake_lock, WAKE_LOCK_SUSPEND,
+		dev_name(&pdev->dev));
+
+	/* set up pointers to ram-based 'registers' */
+	tspp_filter_table[0] = TSPP_PID_FILTER_TABLE0 + device->base;
+	tspp_filter_table[1] = TSPP_PID_FILTER_TABLE1 + device->base;
+	tspp_filter_table[2] = TSPP_PID_FILTER_TABLE2 + device->base;
+	tspp_key_table = TSPP_DATA_KEY + device->base;
+	tspp_global_performance = TSPP_GLOBAL_PERFORMANCE + device->base;
+	tspp_pipe_context = TSPP_PIPE_CONTEXT + device->base;
+	tspp_pipe_performance = TSPP_PIPE_PERFORMANCE + device->base;
+
+	device->bam_props.summing_threshold = 0x10;
+	device->bam_props.irq = device->bam_irq;
+	device->bam_props.manage = SPS_BAM_MGR_LOCAL;
+
+	if (sps_register_bam_device(&device->bam_props,
+		&device->bam_handle) != 0) {
+		pr_err("tspp: failed to register bam");
+		goto err_bam;
+	}
+
+	if (device->tsif_pclk && clk_enable(device->tsif_pclk) != 0) {
+		dev_err(&pdev->dev, "Can't start pclk");
+		goto err_pclk;
+	}
+	if (device->tsif_ref_clk && clk_enable(device->tsif_ref_clk) != 0) {
+		dev_err(&pdev->dev, "Can't start ref clk");
+		goto err_refclk;
+	}
+
+	spin_lock_init(&device->spinlock);
+	tasklet_init(&device->tlet, tspp_sps_complete_tlet,
+			(unsigned long)device);
+
+	/* initialize everything to a known state */
+	tspp_global_reset(device);
+
+	version = readl_relaxed(device->base + TSPP_VERSION);
+	if (version != 1)
+		pr_warn("tspp: unrecognized hw version=%i", version);
+
+	/* update the channels with the device */
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+		channel = &channel_list[i];
+		channel->pdev = device;
+	}
+
+	/* everything is ok */
+	return 0;
+
+err_refclk:
+	if (device->tsif_pclk)
+		clk_disable(device->tsif_pclk);
+err_pclk:
+	sps_deregister_bam_device(device->bam_handle);
+err_bam:
+#ifdef TSPP_USE_DEBUGFS
+	tspp_debugfs_exit(device);
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		tsif_debugfs_exit(&device->tsif[i]);
+#endif /* TSPP_USE_DEBUGFS */
+err_gpio:
+err_irq:
+	tspp_stop_gpios(device);
+	iounmap(device->bam_props.virt_addr);
+err_map_bam:
+err_res_bam:
+	iounmap(device->base);
+err_map_dev:
+err_res_dev:
+	iounmap(device->tsif[1].base);
+err_map_tsif1:
+err_res_tsif1:
+	iounmap(device->tsif[0].base);
+err_map_tsif0:
+err_res_tsif0:
+	if (device->tsif_ref_clk)
+		clk_put(device->tsif_ref_clk);
+err_refclock:
+	if (device->tsif_pclk)
+		clk_put(device->tsif_pclk);
+err_pclock:
+	kfree(device);
+
+out:
+	return rc;
+}
+
+static int __devexit msm_tspp_remove(struct platform_device *pdev)
+{
+#ifdef TSPP_USE_DEBUGFS
+	u32 i;
+#endif /* TSPP_USE_DEBUGFS */
+
+	struct tspp_device *device = platform_get_drvdata(pdev);
+
+	sps_deregister_bam_device(device->bam_handle);
+
+#ifdef TSPP_USE_DEBUGFS
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		tsif_debugfs_exit(&device->tsif[i]);
+#endif /* TSPP_USE_DEBUGFS */
+
+	wake_lock_destroy(&device->wake_lock);
+	free_irq(device->tspp_irq, device);
+	tspp_stop_gpios(device);
+
+	iounmap(device->bam_props.virt_addr);
+	iounmap(device->base);
+	for (i = 0; i < TSPP_TSIF_INSTANCES; i++)
+		iounmap(device->tsif[i].base);
+
+	if (device->tsif_ref_clk)
+		clk_put(device->tsif_ref_clk);
+
+	if (device->tsif_pclk)
+		clk_put(device->tsif_pclk);
+
+	pm_runtime_disable(&pdev->dev);
+	pm_runtime_put(&pdev->dev);
+	kfree(device);
+
+	return 0;
+}
+
+/*** power management ***/
+
+static int tspp_runtime_suspend(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: suspending...");
+	return 0;
+}
+
+static int tspp_runtime_resume(struct device *dev)
+{
+	dev_dbg(dev, "pm_runtime: resuming...");
+	return 0;
+}
+
+static const struct dev_pm_ops tspp_dev_pm_ops = {
+	.runtime_suspend = tspp_runtime_suspend,
+	.runtime_resume = tspp_runtime_resume,
+};
+
+static struct platform_driver msm_tspp_driver = {
+	.probe          = msm_tspp_probe,
+	.remove         = __exit_p(msm_tspp_remove),
+	.driver         = {
+		.name   = "msm_tspp",
+		.pm     = &tspp_dev_pm_ops,
+	},
+};
+
+
+static int __init mod_init(void)
+{
+	u32 i;
+	int rc;
+
+	/* first register the driver, and check hardware */
+	rc = platform_driver_register(&msm_tspp_driver);
+	if (rc) {
+		pr_err("tspp: platform_driver_register failed: %d", rc);
+		goto err_register;
+	}
+
+	/* now make the char devs (channels) */
+	rc = alloc_chrdev_region(&tspp_minor, 0, TSPP_NUM_CHANNELS, "tspp");
+	if (rc) {
+		pr_err("tspp: alloc_chrdev_region failed: %d", rc);
+		goto err_devrgn;
+	}
+
+	tspp_class = class_create(THIS_MODULE, "tspp");
+	if (IS_ERR(tspp_class)) {
+		rc = PTR_ERR(tspp_class);
+		pr_err("tspp: Error creating class: %d", rc);
+		goto err_class;
+	}
+
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+		if (tspp_channel_init(&channel_list[i]) != 0) {
+			pr_err("tspp_channel_init failed");
+			break;
+		}
+	}
+
+	return 0;
+
+err_class:
+	unregister_chrdev_region(0, TSPP_NUM_CHANNELS);
+err_devrgn:
+	platform_driver_unregister(&msm_tspp_driver);
+err_register:
+	return rc;
+}
+
+static void __exit mod_exit(void)
+{
+	u32 i;
+	struct tspp_channel *channel;
+
+	/* first delete upper layer interface */
+	for (i = 0; i < TSPP_NUM_CHANNELS; i++) {
+		channel = &channel_list[i];
+		device_destroy(tspp_class, channel->cdev.dev);
+		cdev_del(&channel->cdev);
+	}
+	class_destroy(tspp_class);
+	unregister_chrdev_region(0, TSPP_NUM_CHANNELS);
+
+	/* now delete low level driver */
+	platform_driver_unregister(&msm_tspp_driver);
+}
+
+module_init(mod_init);
+module_exit(mod_exit);
+
+MODULE_DESCRIPTION("TSPP character device interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/tzcom.c b/drivers/misc/tzcom.c
new file mode 100644
index 0000000..3b943c8
--- /dev/null
+++ b/drivers/misc/tzcom.c
@@ -0,0 +1,1248 @@
+/* Qualcomm TrustZone communicator driver
+ *
+ * Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define KMSG_COMPONENT "TZCOM"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/android_pmem.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/tzcom.h>
+#include <linux/clk.h>
+#include <mach/scm.h>
+#include <mach/peripheral-loader.h>
+#include <mach/msm_bus.h>
+#include <mach/msm_bus_board.h>
+#include <mach/socinfo.h>
+#include "tzcomi.h"
+
+#define TZCOM_DEV "tzcom"
+
+#define TZSCHEDULER_CMD_ID 1 /* CMD id of the trustzone scheduler */
+
+#undef PDEBUG
+#define PDEBUG(fmt, args...) pr_debug("%s(%i, %s): " fmt "\n", \
+		__func__, current->pid, current->comm, ## args)
+
+#undef PERR
+#define PERR(fmt, args...) pr_err("%s(%i, %s): " fmt "\n", \
+		__func__, current->pid, current->comm, ## args)
+
+#undef PWARN
+#define PWARN(fmt, args...) pr_warning("%s(%i, %s): " fmt "\n", \
+		__func__, current->pid, current->comm, ## args)
+
+
+static uint32_t tzcom_perf_client;
+static struct class *driver_class;
+static dev_t tzcom_device_no;
+static struct cdev tzcom_cdev;
+struct ion_client *ion_clnt;
+static u8 *sb_in_virt;
+static s32 sb_in_phys;
+static size_t sb_in_length = 20 * SZ_1K;
+static u8 *sb_out_virt;
+static s32 sb_out_phys;
+static size_t sb_out_length = 20 * SZ_1K;
+
+static void *pil;
+
+static atomic_t svc_instance_ctr = ATOMIC_INIT(0);
+static DEFINE_MUTEX(sb_in_lock);
+static DEFINE_MUTEX(sb_out_lock);
+static DEFINE_MUTEX(send_cmd_lock);
+static DEFINE_MUTEX(tzcom_bw_mutex);
+static int tzcom_bw_count;
+static struct clk *tzcom_bus_clk;
+struct tzcom_callback_list {
+	struct list_head      list;
+	struct tzcom_callback callback;
+};
+
+struct tzcom_registered_svc_list {
+	struct list_head                 list;
+	struct tzcom_register_svc_op_req svc;
+	wait_queue_head_t                next_cmd_wq;
+	int                              next_cmd_flag;
+};
+
+struct tzcom_data_t {
+	struct list_head  callback_list_head;
+	struct mutex      callback_list_lock;
+	struct list_head  registered_svc_list_head;
+	spinlock_t        registered_svc_list_lock;
+	wait_queue_head_t cont_cmd_wq;
+	int               cont_cmd_flag;
+	u32               handled_cmd_svc_instance_id;
+	int               abort;
+	wait_queue_head_t abort_wq;
+	atomic_t          ioctl_count;
+};
+
+static int tzcom_enable_bus_scaling(void)
+{
+	int ret = 0;
+	if (!tzcom_perf_client)
+		return -EINVAL;
+
+	if (IS_ERR_OR_NULL(tzcom_bus_clk))
+		return -EINVAL;
+
+	mutex_lock(&tzcom_bw_mutex);
+	if (!tzcom_bw_count) {
+		ret = msm_bus_scale_client_update_request(
+				tzcom_perf_client, 1);
+		if (ret) {
+			pr_err("Bandwidth request failed (%d)\n", ret);
+		} else {
+			ret = clk_enable(tzcom_bus_clk);
+			if (ret)
+				pr_err("Clock enable failed\n");
+		}
+	}
+	if (ret)
+		msm_bus_scale_client_update_request(tzcom_perf_client, 0);
+	else
+		tzcom_bw_count++;
+	mutex_unlock(&tzcom_bw_mutex);
+	return ret;
+}
+
+static void tzcom_disable_bus_scaling(void)
+{
+	if (!tzcom_perf_client)
+		return ;
+
+	if (IS_ERR_OR_NULL(tzcom_bus_clk))
+		return ;
+
+	mutex_lock(&tzcom_bw_mutex);
+	if (tzcom_bw_count > 0)
+		if (tzcom_bw_count-- == 1) {
+			msm_bus_scale_client_update_request(tzcom_perf_client,
+								0);
+			clk_disable(tzcom_bus_clk);
+		}
+	mutex_unlock(&tzcom_bw_mutex);
+}
+
+static int tzcom_scm_call(const void *cmd_buf, size_t cmd_len,
+		void *resp_buf, size_t resp_len)
+{
+	return scm_call(SCM_SVC_TZSCHEDULER, TZSCHEDULER_CMD_ID,
+			cmd_buf, cmd_len, resp_buf, resp_len);
+}
+
+static s32 tzcom_virt_to_phys(u8 *virt)
+{
+	if (virt >= sb_in_virt &&
+			virt < (sb_in_virt + sb_in_length)) {
+		return sb_in_phys + (virt - sb_in_virt);
+	} else if (virt >= sb_out_virt &&
+			virt < (sb_out_virt + sb_out_length)) {
+		return sb_out_phys + (virt - sb_out_virt);
+	} else {
+		return virt_to_phys(virt);
+	}
+}
+
+static u8 *tzcom_phys_to_virt(s32 phys)
+{
+	if (phys >= sb_in_phys &&
+			phys < (sb_in_phys + sb_in_length)) {
+		return sb_in_virt + (phys - sb_in_phys);
+	} else if (phys >= sb_out_phys &&
+			phys < (sb_out_phys + sb_out_length)) {
+		return sb_out_virt + (phys - sb_out_phys);
+	} else {
+		return phys_to_virt(phys);
+	}
+}
+
+static int __tzcom_is_svc_unique(struct tzcom_data_t *data,
+		struct tzcom_register_svc_op_req svc)
+{
+	struct tzcom_registered_svc_list *ptr;
+	int unique = 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+	list_for_each_entry(ptr, &data->registered_svc_list_head, list) {
+		if (ptr->svc.svc_id == svc.svc_id) {
+			PERR("Service id: %u is already registered",
+					ptr->svc.svc_id);
+			unique = 0;
+			break;
+		} else if (svc.cmd_id_low >= ptr->svc.cmd_id_low &&
+				svc.cmd_id_low <= ptr->svc.cmd_id_high) {
+			PERR("Cmd id low falls in the range of another"
+					"registered service");
+			unique = 0;
+			break;
+		} else if (svc.cmd_id_high >= ptr->svc.cmd_id_low &&
+				svc.cmd_id_high <= ptr->svc.cmd_id_high) {
+			PERR("Cmd id high falls in the range of another"
+					"registered service");
+			unique = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+	return unique;
+}
+
+static int tzcom_register_service(struct tzcom_data_t *data, void __user *argp)
+{
+	int ret;
+	unsigned long flags;
+	struct tzcom_register_svc_op_req rcvd_svc;
+	struct tzcom_registered_svc_list *new_entry;
+
+	ret = copy_from_user(&rcvd_svc, argp, sizeof(rcvd_svc));
+
+	if (ret) {
+		PERR("copy_from_user failed");
+		return ret;
+	}
+
+	PDEBUG("svc_id: %u, cmd_id_low: %u, cmd_id_high: %u",
+			rcvd_svc.svc_id, rcvd_svc.cmd_id_low,
+			rcvd_svc.cmd_id_high);
+	if (!__tzcom_is_svc_unique(data, rcvd_svc)) {
+		PERR("Provided service is not unique");
+		return -EINVAL;
+	}
+
+	rcvd_svc.instance_id = atomic_inc_return(&svc_instance_ctr);
+
+	ret = copy_to_user(argp, &rcvd_svc, sizeof(rcvd_svc));
+	if (ret) {
+		PERR("copy_to_user failed");
+		return ret;
+	}
+
+	new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+	if (!new_entry) {
+		PERR("kmalloc failed");
+		return -ENOMEM;
+	}
+	memcpy(&new_entry->svc, &rcvd_svc, sizeof(rcvd_svc));
+	new_entry->next_cmd_flag = 0;
+	init_waitqueue_head(&new_entry->next_cmd_wq);
+
+	spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+	list_add_tail(&new_entry->list, &data->registered_svc_list_head);
+	spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+
+
+	return ret;
+}
+
+static int tzcom_unregister_service(struct tzcom_data_t *data,
+		void __user *argp)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct tzcom_unregister_svc_op_req req;
+	struct tzcom_registered_svc_list *ptr, *next;
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		PERR("copy_from_user failed");
+		return ret;
+	}
+
+	spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+	list_for_each_entry_safe(ptr, next, &data->registered_svc_list_head,
+			list) {
+		if (req.svc_id == ptr->svc.svc_id &&
+				req.instance_id == ptr->svc.instance_id) {
+			wake_up_all(&ptr->next_cmd_wq);
+			list_del(&ptr->list);
+			kfree(ptr);
+			spin_unlock_irqrestore(&data->registered_svc_list_lock,
+					flags);
+			return 0;
+		}
+	}
+	spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+
+	return -EINVAL;
+}
+
+static int __tzcom_is_cont_cmd(struct tzcom_data_t *data)
+{
+	int ret;
+	ret = (data->cont_cmd_flag != 0);
+	return ret || data->abort;
+}
+
+/**
+ *   +---------+                              +-----+       +-----------------+
+ *   |  TZCOM  |                              | SCM |       | TZCOM_SCHEDULER |
+ *   +----+----+                              +--+--+       +--------+--------+
+ *        |                                      |                   |
+ *        |        scm_call                      |                   |
+ *        |------------------------------------->|                   |
+ *        |  cmd_buf = struct tzcom_command {    |                   |
+ *        |              cmd_type,               |------------------>|
+ * +------+------------- sb_in_cmd_addr,         |                   |
+ * |      |              sb_in_cmd_len           |                   |
+ * |      |            }                         |                   |
+ * |      |   resp_buf = struct tzcom_response { |                   |
+ * |                         cmd_status,         |                   |
+ * |             +---------- sb_in_rsp_addr,     |                   |
+ * |             |           sb_in_rsp_len       |<------------------|
+ * |             |         }
+ * |             |                            struct tzcom_callback {---------+
+ * |             |                                uint32_t cmd_id;            |
+ * |             |                                uint32_t sb_out_cb_data_len;|
+ * |             +---------------+                uint32_t sb_out_cb_data_off;|
+ * |                             |            }                               |
+ * |    _________________________|_______________________________             |
+ * |    +-----------------------+| +----------------------+                   |
+ * +--->+ copy from req.cmd_buf |+>| copy to req.resp_buf |                   |
+ *      +-----------------------+  +----------------------+                   |
+ *      _________________________________________________________             |
+ *                               INPUT SHARED BUFFER                          |
+ *   +------------------------------------------------------------------------+
+ *   |  _________________________________________________________
+ *   |  +---------------------------------------------+
+ *   +->| cmd_id | data_len | data_off |   data...    |
+ *      +---------------------------------------------+
+ *                                     |<------------>|copy to next_cmd.req_buf
+ *      _________________________________________________________
+ *                              OUTPUT SHARED BUFFER
+ */
+static int __tzcom_send_cmd(struct tzcom_data_t *data,
+			struct tzcom_send_cmd_op_req *req)
+{
+	int ret = 0;
+	unsigned long flags;
+	u32 reqd_len_sb_in = 0;
+	u32 reqd_len_sb_out = 0;
+	struct tzcom_command cmd;
+	struct tzcom_response resp;
+	struct tzcom_callback *next_callback;
+	void *cb_data = NULL;
+	struct tzcom_callback_list *new_entry;
+	struct tzcom_callback *cb;
+	size_t new_entry_len = 0;
+	struct tzcom_registered_svc_list *ptr_svc;
+
+	if (req->cmd_buf == NULL || req->resp_buf == NULL) {
+		PERR("cmd buffer or response buffer is null");
+		return -EINVAL;
+	}
+
+	if (req->cmd_len <= 0 || req->resp_len <= 0 ||
+		req->cmd_len > sb_in_length || req->resp_len > sb_in_length) {
+		PERR("cmd buffer length or "
+				"response buffer length not valid");
+		return -EINVAL;
+	}
+	PDEBUG("received cmd_req.req: 0x%p",
+				req->cmd_buf);
+	PDEBUG("received cmd_req.rsp size: %u, ptr: 0x%p",
+			req->resp_len,
+			req->resp_buf);
+
+	reqd_len_sb_in = req->cmd_len + req->resp_len;
+	if (reqd_len_sb_in > sb_in_length) {
+		PDEBUG("Not enough memory to fit cmd_buf and "
+				"resp_buf. Required: %u, Available: %u",
+				reqd_len_sb_in, sb_in_length);
+		return -ENOMEM;
+	}
+
+	/* Copy req->cmd_buf to SB in and set
+	 * req->resp_buf to SB in + cmd_len
+	 */
+	mutex_lock(&sb_in_lock);
+	PDEBUG("Before memcpy on sb_in");
+	memcpy(sb_in_virt, req->cmd_buf, req->cmd_len);
+	PDEBUG("After memcpy on sb_in");
+
+	/* cmd_type will always be a new here */
+	cmd.cmd_type = TZ_SCHED_CMD_NEW;
+	cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
+	cmd.sb_in_cmd_len = req->cmd_len;
+
+	resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
+	resp.sb_in_rsp_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt +
+			req->cmd_len);
+	resp.sb_in_rsp_len = req->resp_len;
+
+	PDEBUG("before call tzcom_scm_call, cmd_id = : %u", req->cmd_id);
+	PDEBUG("before call tzcom_scm_call, sizeof(cmd) = : %u", sizeof(cmd));
+
+	ret = tzcom_scm_call((const void *) &cmd, sizeof(cmd),
+			&resp, sizeof(resp));
+	mutex_unlock(&sb_in_lock);
+
+	if (ret) {
+		PERR("tzcom_scm_call failed with err: %d", ret);
+		return ret;
+	}
+
+	while (resp.cmd_status != TZ_SCHED_STATUS_COMPLETE) {
+		/*
+		 * If cmd is incomplete, get the callback cmd out from SB out
+		 * and put it on the list
+		 */
+		PDEBUG("cmd_status is incomplete.");
+		next_callback = (struct tzcom_callback *)sb_out_virt;
+
+		mutex_lock(&sb_out_lock);
+		reqd_len_sb_out = sizeof(*next_callback)
+					+ next_callback->sb_out_cb_data_len;
+		if (reqd_len_sb_out > sb_out_length ||
+			reqd_len_sb_out < sizeof(*next_callback) ||
+			next_callback->sb_out_cb_data_len > sb_out_length) {
+			PERR("Incorrect callback data length"
+					" Required: %u, Available: %u, Min: %u",
+					reqd_len_sb_out, sb_out_length,
+					sizeof(*next_callback));
+			mutex_unlock(&sb_out_lock);
+			return -ENOMEM;
+		}
+
+		/* Assumption is cb_data_off is sizeof(tzcom_callback) */
+		new_entry_len = sizeof(*new_entry)
+					+ next_callback->sb_out_cb_data_len;
+		new_entry = kmalloc(new_entry_len, GFP_KERNEL);
+		if (!new_entry) {
+			PERR("kmalloc failed");
+			mutex_unlock(&sb_out_lock);
+			return -ENOMEM;
+		}
+
+		cb = &new_entry->callback;
+		cb->cmd_id = next_callback->cmd_id;
+		cb->sb_out_cb_data_len = next_callback->sb_out_cb_data_len;
+		cb->sb_out_cb_data_off = sizeof(*cb);
+
+		cb_data = (u8 *)next_callback
+				+ next_callback->sb_out_cb_data_off;
+		memcpy((u8 *)cb + cb->sb_out_cb_data_off, cb_data,
+				next_callback->sb_out_cb_data_len);
+		mutex_unlock(&sb_out_lock);
+
+		mutex_lock(&data->callback_list_lock);
+		list_add_tail(&new_entry->list, &data->callback_list_head);
+		mutex_unlock(&data->callback_list_lock);
+
+		/*
+		 * We don't know which service can handle the command. so we
+		 * wake up all blocking services and let them figure out if
+		 * they can handle the given command.
+		 */
+		spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+		list_for_each_entry(ptr_svc,
+				&data->registered_svc_list_head, list) {
+			ptr_svc->next_cmd_flag = 1;
+			wake_up_interruptible(&ptr_svc->next_cmd_wq);
+		}
+		spin_unlock_irqrestore(&data->registered_svc_list_lock,
+				flags);
+
+		PDEBUG("waking up next_cmd_wq and "
+				"waiting for cont_cmd_wq");
+		if (wait_event_interruptible(data->cont_cmd_wq,
+				__tzcom_is_cont_cmd(data))) {
+			PWARN("Interrupted: exiting send_cmd loop");
+			return -ERESTARTSYS;
+		}
+
+		if (data->abort) {
+			PERR("Aborting driver");
+			return -ENODEV;
+		}
+		data->cont_cmd_flag = 0;
+		cmd.cmd_type = TZ_SCHED_CMD_PENDING;
+		mutex_lock(&sb_in_lock);
+		ret = tzcom_scm_call((const void *) &cmd, sizeof(cmd), &resp,
+				sizeof(resp));
+		mutex_unlock(&sb_in_lock);
+		if (ret) {
+			PERR("tzcom_scm_call failed with err: %d", ret);
+			return ret;
+		}
+	}
+
+	mutex_lock(&sb_in_lock);
+	resp.sb_in_rsp_addr = sb_in_virt + cmd.sb_in_cmd_len;
+	resp.sb_in_rsp_len = req->resp_len;
+	memcpy(req->resp_buf, resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
+	/* Zero out memory for security purpose */
+	memset(sb_in_virt, 0, reqd_len_sb_in);
+	mutex_unlock(&sb_in_lock);
+
+	return ret;
+}
+
+
+static int tzcom_send_cmd(struct tzcom_data_t *data, void __user *argp)
+{
+	int ret = 0;
+	struct tzcom_send_cmd_op_req req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		PERR("copy_from_user failed");
+		return ret;
+	}
+	ret = __tzcom_send_cmd(data, &req);
+	if (ret)
+		return ret;
+
+	PDEBUG("sending cmd_req->rsp "
+			"size: %u, ptr: 0x%p", req.resp_len,
+			req.resp_buf);
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret) {
+		PDEBUG("copy_to_user failed");
+		return ret;
+	}
+	return ret;
+}
+
+static int __tzcom_send_cmd_req_clean_up(
+			struct tzcom_send_cmd_fd_op_req *req)
+{
+	char *field;
+	uint32_t *update;
+	int ret = 0;
+	int i = 0;
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req->ifd_data[i].fd != 0) {
+			field = (char *)req->cmd_buf +
+					req->ifd_data[i].cmd_buf_offset;
+			update = (uint32_t *) field;
+			*update = 0;
+		}
+	}
+	return ret;
+}
+
+static int __tzcom_update_with_phy_addr(
+			struct tzcom_send_cmd_fd_op_req *req)
+{
+	struct ion_handle *ihandle;
+	char *field;
+	uint32_t *update;
+	ion_phys_addr_t pa;
+	int ret = 0;
+	int i = 0;
+	uint32_t length;
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req->ifd_data[i].fd != 0) {
+			/* Get the handle of the shared fd */
+			ihandle = ion_import_fd(ion_clnt, req->ifd_data[i].fd);
+			if (ihandle == NULL) {
+				PERR("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = (char *) req->cmd_buf +
+						req->ifd_data[i].cmd_buf_offset;
+			update = (uint32_t *) field;
+
+			/* Populate the cmd data structure with the phys_addr */
+			ret = ion_phys(ion_clnt, ihandle, &pa, &length);
+			if (ret)
+				return -ENOMEM;
+
+			*update = (uint32_t)pa;
+			ion_free(ion_clnt, ihandle);
+		}
+	}
+	return ret;
+}
+
+static int tzcom_send_cmd_with_fd(struct tzcom_data_t *data,
+					void __user *argp)
+{
+	int ret = 0;
+	struct tzcom_send_cmd_fd_op_req req;
+	struct tzcom_send_cmd_op_req send_cmd_req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		PERR("copy_from_user failed");
+		return ret;
+	}
+
+	send_cmd_req.cmd_id = req.cmd_id;
+	send_cmd_req.cmd_buf = req.cmd_buf;
+	send_cmd_req.cmd_len = req.cmd_len;
+	send_cmd_req.resp_buf = req.resp_buf;
+	send_cmd_req.resp_len = req.resp_len;
+
+	ret = __tzcom_update_with_phy_addr(&req);
+	if (ret)
+		return ret;
+	ret = __tzcom_send_cmd(data, &send_cmd_req);
+	__tzcom_send_cmd_req_clean_up(&req);
+
+	if (ret)
+		return ret;
+
+	PDEBUG("sending cmd_req->rsp "
+			"size: %u, ptr: 0x%p", req.resp_len,
+			req.resp_buf);
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret) {
+		PDEBUG("copy_to_user failed");
+		return ret;
+	}
+	return ret;
+}
+
+static struct tzcom_registered_svc_list *__tzcom_find_svc(
+		struct tzcom_data_t *data,
+		uint32_t instance_id)
+{
+	struct tzcom_registered_svc_list *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+	list_for_each_entry(entry,
+			&data->registered_svc_list_head, list) {
+		if (entry->svc.instance_id == instance_id)
+			break;
+	}
+	spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+
+	return entry;
+}
+
+static int __tzcom_copy_cmd(struct tzcom_data_t *data,
+		struct tzcom_next_cmd_op_req *req,
+		struct tzcom_registered_svc_list *ptr_svc)
+{
+	int found = 0;
+	int ret = -EAGAIN;
+	struct tzcom_callback_list *entry, *next;
+	struct tzcom_callback *cb;
+
+	PDEBUG("In here");
+	mutex_lock(&data->callback_list_lock);
+	PDEBUG("Before looping through cmd and svc lists.");
+	list_for_each_entry_safe(entry, next, &data->callback_list_head, list) {
+		cb = &entry->callback;
+		if (req->svc_id == ptr_svc->svc.svc_id &&
+			req->instance_id == ptr_svc->svc.instance_id &&
+			cb->cmd_id >= ptr_svc->svc.cmd_id_low &&
+			cb->cmd_id <= ptr_svc->svc.cmd_id_high) {
+			PDEBUG("Found matching entry");
+			found = 1;
+			if (cb->sb_out_cb_data_len <= req->req_len) {
+				PDEBUG("copying cmd buffer %p to req "
+					"buffer %p, length: %u",
+					(u8 *)cb + cb->sb_out_cb_data_off,
+					req->req_buf, cb->sb_out_cb_data_len);
+				req->cmd_id = cb->cmd_id;
+				ret = copy_to_user(req->req_buf,
+					(u8 *)cb + cb->sb_out_cb_data_off,
+					cb->sb_out_cb_data_len);
+				if (ret) {
+					PERR("copy_to_user failed");
+					break;
+				}
+				list_del(&entry->list);
+				kfree(entry);
+				ret = 0;
+			} else {
+				PERR("callback data buffer is "
+					"larger than provided buffer."
+					"Required: %u, Provided: %u",
+					cb->sb_out_cb_data_len,
+					req->req_len);
+				ret = -ENOMEM;
+			}
+			break;
+		}
+	}
+	PDEBUG("After looping through cmd and svc lists.");
+	mutex_unlock(&data->callback_list_lock);
+	return ret;
+}
+
+static int __tzcom_is_next_cmd(struct tzcom_data_t *data,
+		struct tzcom_registered_svc_list *svc)
+{
+	int ret;
+	ret = (svc->next_cmd_flag != 0);
+	return ret || data->abort;
+}
+
+static int tzcom_read_next_cmd(struct tzcom_data_t *data, void __user *argp)
+{
+	int ret = 0;
+	struct tzcom_next_cmd_op_req req;
+	struct tzcom_registered_svc_list *this_svc;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		PERR("copy_from_user failed");
+		return ret;
+	}
+
+	if (req.instance_id > atomic_read(&svc_instance_ctr)) {
+		PERR("Invalid instance_id for the request");
+		return -EINVAL;
+	}
+
+	if (!req.req_buf || req.req_len == 0) {
+		PERR("Invalid request buffer or buffer length");
+		return -EINVAL;
+	}
+
+	PDEBUG("Before next_cmd loop");
+	this_svc = __tzcom_find_svc(data, req.instance_id);
+
+	while (1) {
+		PDEBUG("Before wait_event next_cmd.");
+		if (wait_event_interruptible(this_svc->next_cmd_wq,
+				__tzcom_is_next_cmd(data, this_svc))) {
+			PWARN("Interrupted: exiting wait_next_cmd loop");
+			/* woken up for different reason */
+			return -ERESTARTSYS;
+		}
+
+		if (data->abort) {
+			PERR("Aborting driver");
+			return -ENODEV;
+		}
+		PDEBUG("After wait_event next_cmd.");
+		this_svc->next_cmd_flag = 0;
+
+		ret = __tzcom_copy_cmd(data, &req, this_svc);
+		if (ret == 0) {
+			PDEBUG("Successfully found svc for cmd");
+			data->handled_cmd_svc_instance_id = req.instance_id;
+			break;
+		} else if (ret == -ENOMEM) {
+			PERR("Not enough memory");
+			return ret;
+		}
+	}
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret) {
+		PERR("copy_to_user failed");
+		return ret;
+	}
+	PDEBUG("copy_to_user is done.");
+	return ret;
+}
+
+static int tzcom_cont_cmd(struct tzcom_data_t *data, void __user *argp)
+{
+	int ret = 0;
+	struct tzcom_cont_cmd_op_req req;
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		PERR("copy_from_user failed");
+		return ret;
+	}
+
+	/*
+	 * Only the svc instance that handled the cmd (in read_next_cmd method)
+	 * can call continue cmd
+	 */
+	if (data->handled_cmd_svc_instance_id != req.instance_id) {
+		PWARN("Only the service instance that handled the last "
+				"callback can continue cmd. "
+				"Expected: %u, Received: %u",
+				data->handled_cmd_svc_instance_id,
+				req.instance_id);
+		return -EINVAL;
+	}
+
+	if (req.resp_buf) {
+		mutex_lock(&sb_out_lock);
+		memcpy(sb_out_virt, req.resp_buf, req.resp_len);
+		mutex_unlock(&sb_out_lock);
+	}
+
+	data->cont_cmd_flag = 1;
+	wake_up_interruptible(&data->cont_cmd_wq);
+	return ret;
+}
+
+static int tzcom_abort(struct tzcom_data_t *data)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct tzcom_registered_svc_list *lsvc, *nsvc;
+	if (data->abort) {
+		PERR("Already aborting");
+		return -EINVAL;
+	}
+
+	data->abort = 1;
+
+	PDEBUG("Waking up cont_cmd_wq");
+	wake_up_all(&data->cont_cmd_wq);
+
+	spin_lock_irqsave(&data->registered_svc_list_lock, flags);
+	PDEBUG("Before waking up service wait queues");
+	list_for_each_entry_safe(lsvc, nsvc,
+			&data->registered_svc_list_head, list) {
+		wake_up_all(&lsvc->next_cmd_wq);
+	}
+	spin_unlock_irqrestore(&data->registered_svc_list_lock, flags);
+
+	PDEBUG("ioctl_count before loop: %d", atomic_read(&data->ioctl_count));
+	while (atomic_read(&data->ioctl_count) > 0) {
+		if (wait_event_interruptible(data->abort_wq,
+				atomic_read(&data->ioctl_count) <= 0)) {
+			PERR("Interrupted from abort");
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	return ret;
+}
+
+static long tzcom_ioctl(struct file *file, unsigned cmd,
+		unsigned long arg)
+{
+	int ret = 0;
+	struct tzcom_data_t *tzcom_data = file->private_data;
+	void __user *argp = (void __user *) arg;
+	PDEBUG("enter tzcom_ioctl()");
+	if (tzcom_data->abort) {
+		PERR("Aborting tzcom driver");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case TZCOM_IOCTL_REGISTER_SERVICE_REQ: {
+		PDEBUG("ioctl register_service_req()");
+		atomic_inc(&tzcom_data->ioctl_count);
+		ret = tzcom_register_service(tzcom_data, argp);
+		atomic_dec(&tzcom_data->ioctl_count);
+		wake_up_interruptible(&tzcom_data->abort_wq);
+		if (ret)
+			PERR("failed tzcom_register_service: %d", ret);
+		break;
+	}
+	case TZCOM_IOCTL_UNREGISTER_SERVICE_REQ: {
+		PDEBUG("ioctl unregister_service_req()");
+		atomic_inc(&tzcom_data->ioctl_count);
+		ret = tzcom_unregister_service(tzcom_data, argp);
+		atomic_dec(&tzcom_data->ioctl_count);
+		wake_up_interruptible(&tzcom_data->abort_wq);
+		if (ret)
+			PERR("failed tzcom_unregister_service: %d", ret);
+		break;
+	}
+	case TZCOM_IOCTL_SEND_CMD_REQ: {
+		PDEBUG("ioctl send_cmd_req()");
+		/* Only one client allowed here at a time */
+		mutex_lock(&send_cmd_lock);
+		atomic_inc(&tzcom_data->ioctl_count);
+		ret = tzcom_send_cmd(tzcom_data, argp);
+		atomic_dec(&tzcom_data->ioctl_count);
+		wake_up_interruptible(&tzcom_data->abort_wq);
+		mutex_unlock(&send_cmd_lock);
+		if (ret)
+			PERR("failed tzcom_send_cmd: %d", ret);
+		break;
+	}
+	case TZCOM_IOCTL_SEND_CMD_FD_REQ: {
+		PDEBUG("ioctl send_cmd_req()");
+		/* Only one client allowed here at a time */
+		mutex_lock(&send_cmd_lock);
+		atomic_inc(&tzcom_data->ioctl_count);
+		ret = tzcom_send_cmd_with_fd(tzcom_data, argp);
+		atomic_dec(&tzcom_data->ioctl_count);
+		wake_up_interruptible(&tzcom_data->abort_wq);
+		mutex_unlock(&send_cmd_lock);
+		if (ret)
+			PERR("failed tzcom_send_cmd: %d", ret);
+		break;
+	}
+	case TZCOM_IOCTL_READ_NEXT_CMD_REQ: {
+		PDEBUG("ioctl read_next_cmd_req()");
+		atomic_inc(&tzcom_data->ioctl_count);
+		ret = tzcom_read_next_cmd(tzcom_data, argp);
+		atomic_dec(&tzcom_data->ioctl_count);
+		wake_up_interruptible(&tzcom_data->abort_wq);
+		if (ret)
+			PERR("failed tzcom_read_next: %d", ret);
+		break;
+	}
+	case TZCOM_IOCTL_CONTINUE_CMD_REQ: {
+		PDEBUG("ioctl continue_cmd_req()");
+		atomic_inc(&tzcom_data->ioctl_count);
+		ret = tzcom_cont_cmd(tzcom_data, argp);
+		atomic_dec(&tzcom_data->ioctl_count);
+		wake_up_interruptible(&tzcom_data->abort_wq);
+		if (ret)
+			PERR("failed tzcom_cont_cmd: %d", ret);
+		break;
+	}
+	case TZCOM_IOCTL_ABORT_REQ: {
+		PDEBUG("ioctl abort_req()");
+		ret = tzcom_abort(tzcom_data);
+		if (ret)
+			PERR("failed tzcom_abort: %d", ret);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int tzcom_open(struct inode *inode, struct file *file)
+{
+	int ret;
+	long pil_error;
+	struct tz_pr_init_sb_req_s sb_out_init_req;
+	struct tz_pr_init_sb_rsp_s sb_out_init_rsp;
+	void *rsp_addr_virt;
+	struct tzcom_command cmd;
+	struct tzcom_response resp;
+	struct tzcom_data_t *tzcom_data;
+
+	PDEBUG("In here");
+
+	ret = tzcom_enable_bus_scaling();
+
+	if (pil == NULL) {
+		pil = pil_get("tzapps");
+		if (IS_ERR(pil)) {
+			PERR("Playready PIL image load failed");
+			pil_error = PTR_ERR(pil);
+			pil = NULL;
+			return pil_error;
+		}
+		PDEBUG("tzapps image loaded successfully");
+	}
+
+	sb_out_init_req.pr_cmd = TZ_SCHED_CMD_ID_INIT_SB_OUT;
+	sb_out_init_req.sb_len = sb_out_length;
+	sb_out_init_req.sb_ptr = tzcom_virt_to_phys(sb_out_virt);
+	PDEBUG("sb_out_init_req { pr_cmd: %d, sb_len: %u, "
+			"sb_ptr (phys): 0x%x }",
+			sb_out_init_req.pr_cmd,
+			sb_out_init_req.sb_len,
+			sb_out_init_req.sb_ptr);
+
+	mutex_lock(&sb_in_lock);
+	PDEBUG("Before memcpy on sb_in");
+	memcpy(sb_in_virt, &sb_out_init_req, sizeof(sb_out_init_req));
+	PDEBUG("After memcpy on sb_in");
+
+	/* It will always be a new cmd from this method */
+	cmd.cmd_type = TZ_SCHED_CMD_NEW;
+	cmd.sb_in_cmd_addr = (u8 *) tzcom_virt_to_phys(sb_in_virt);
+	cmd.sb_in_cmd_len = sizeof(sb_out_init_req);
+	PDEBUG("tzcom_command { cmd_type: %u, sb_in_cmd_addr: %p, "
+			"sb_in_cmd_len: %u }",
+			cmd.cmd_type, cmd.sb_in_cmd_addr, cmd.sb_in_cmd_len);
+
+	resp.cmd_status = TZ_SCHED_STATUS_INCOMPLETE;
+
+	PDEBUG("Before scm_call for sb_init");
+	ret = tzcom_scm_call(&cmd, sizeof(cmd), &resp, sizeof(resp));
+	if (ret) {
+		PERR("tzcom_scm_call failed with err: %d", ret);
+		return ret;
+	}
+	PDEBUG("After scm_call for sb_init");
+
+	PDEBUG("tzcom_response after scm cmd_status: %u", resp.cmd_status);
+	if (resp.cmd_status == TZ_SCHED_STATUS_COMPLETE) {
+		resp.sb_in_rsp_addr = (u8 *)cmd.sb_in_cmd_addr +
+				cmd.sb_in_cmd_len;
+		resp.sb_in_rsp_len = sizeof(sb_out_init_rsp);
+		PDEBUG("tzcom_response sb_in_rsp_addr: %p, sb_in_rsp_len: %u",
+				resp.sb_in_rsp_addr, resp.sb_in_rsp_len);
+		rsp_addr_virt = tzcom_phys_to_virt((unsigned long)
+				resp.sb_in_rsp_addr);
+		PDEBUG("Received response phys: %p, virt: %p",
+				resp.sb_in_rsp_addr, rsp_addr_virt);
+		memcpy(&sb_out_init_rsp, rsp_addr_virt, resp.sb_in_rsp_len);
+	} else {
+		PERR("Error with SB initialization");
+		mutex_unlock(&sb_in_lock);
+		return -EPERM;
+	}
+	mutex_unlock(&sb_in_lock);
+
+	PDEBUG("sb_out_init_rsp { pr_cmd: %d, ret: %d }",
+			sb_out_init_rsp.pr_cmd, sb_out_init_rsp.ret);
+
+	if (sb_out_init_rsp.ret) {
+		PERR("sb_out_init_req failed: %d", sb_out_init_rsp.ret);
+		return -EPERM;
+	}
+
+	tzcom_data = kmalloc(sizeof(*tzcom_data), GFP_KERNEL);
+	if (!tzcom_data) {
+		PERR("kmalloc failed");
+		return -ENOMEM;
+	}
+	file->private_data = tzcom_data;
+
+	INIT_LIST_HEAD(&tzcom_data->callback_list_head);
+	mutex_init(&tzcom_data->callback_list_lock);
+
+	INIT_LIST_HEAD(&tzcom_data->registered_svc_list_head);
+	spin_lock_init(&tzcom_data->registered_svc_list_lock);
+
+	init_waitqueue_head(&tzcom_data->cont_cmd_wq);
+	tzcom_data->cont_cmd_flag = 0;
+	tzcom_data->handled_cmd_svc_instance_id = 0;
+	tzcom_data->abort = 0;
+	init_waitqueue_head(&tzcom_data->abort_wq);
+	atomic_set(&tzcom_data->ioctl_count, 0);
+	return 0;
+}
+
+static int tzcom_release(struct inode *inode, struct file *file)
+{
+	struct tzcom_data_t *tzcom_data = file->private_data;
+	struct tzcom_callback_list *lcb, *ncb;
+	struct tzcom_registered_svc_list *lsvc, *nsvc;
+	unsigned long flags;
+	PDEBUG("In here");
+
+	if (!tzcom_data->abort) {
+		PDEBUG("Calling abort");
+		tzcom_abort(tzcom_data);
+	}
+
+	PDEBUG("Before removing callback list");
+	mutex_lock(&tzcom_data->callback_list_lock);
+	list_for_each_entry_safe(lcb, ncb,
+			&tzcom_data->callback_list_head, list) {
+		list_del(&lcb->list);
+		kfree(lcb);
+	}
+	mutex_unlock(&tzcom_data->callback_list_lock);
+	PDEBUG("After removing callback list");
+
+	PDEBUG("Before removing svc list");
+	spin_lock_irqsave(&tzcom_data->registered_svc_list_lock, flags);
+	list_for_each_entry_safe(lsvc, nsvc,
+			&tzcom_data->registered_svc_list_head, list) {
+		list_del(&lsvc->list);
+		kfree(lsvc);
+	}
+	spin_unlock_irqrestore(&tzcom_data->registered_svc_list_lock, flags);
+	PDEBUG("After removing svc list");
+	if (pil != NULL) {
+		pil_put(pil);
+		pil = NULL;
+	}
+	PDEBUG("Freeing tzcom data");
+	kfree(tzcom_data);
+	tzcom_disable_bus_scaling();
+	return 0;
+}
+
+static struct msm_bus_paths tzcom_bw_table[] = {
+	{
+		.vectors = (struct msm_bus_vectors[]){
+			{
+				.src = MSM_BUS_MASTER_SPS,
+				.dst = MSM_BUS_SLAVE_EBI_CH0,
+			},
+		},
+		.num_paths = 1,
+	},
+	{
+		.vectors = (struct msm_bus_vectors[]){
+			{
+				.src = MSM_BUS_MASTER_SPS,
+				.dst = MSM_BUS_SLAVE_EBI_CH0,
+				.ib = (492 * 8) * 1000000UL,
+				.ab = (492 * 8) *  100000UL,
+			},
+		},
+		.num_paths = 1,
+	},
+
+};
+
+static struct msm_bus_scale_pdata tzcom_bus_pdata = {
+	.usecase = tzcom_bw_table,
+	.num_usecases = ARRAY_SIZE(tzcom_bw_table),
+	.name = "tzcom",
+};
+static const struct file_operations tzcom_fops = {
+		.owner = THIS_MODULE,
+		.unlocked_ioctl = tzcom_ioctl,
+		.open = tzcom_open,
+		.release = tzcom_release
+};
+
+static int __init tzcom_init(void)
+{
+	int rc;
+	struct device *class_dev;
+
+	PDEBUG("Hello tzcom");
+
+	rc = alloc_chrdev_region(&tzcom_device_no, 0, 1, TZCOM_DEV);
+	if (rc < 0) {
+		PERR("alloc_chrdev_region failed %d", rc);
+		return rc;
+	}
+
+	driver_class = class_create(THIS_MODULE, TZCOM_DEV);
+	if (IS_ERR(driver_class)) {
+		rc = -ENOMEM;
+		PERR("class_create failed %d", rc);
+		goto unregister_chrdev_region;
+	}
+
+	class_dev = device_create(driver_class, NULL, tzcom_device_no, NULL,
+			TZCOM_DEV);
+	if (!class_dev) {
+		PERR("class_device_create failed %d", rc);
+		rc = -ENOMEM;
+		goto class_destroy;
+	}
+
+	cdev_init(&tzcom_cdev, &tzcom_fops);
+	tzcom_cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&tzcom_cdev, MKDEV(MAJOR(tzcom_device_no), 0), 1);
+	if (rc < 0) {
+		PERR("cdev_add failed %d", rc);
+		goto class_device_destroy;
+	}
+
+	sb_in_phys = pmem_kalloc(sb_in_length, PMEM_MEMTYPE_EBI1 |
+			PMEM_ALIGNMENT_4K);
+	if (IS_ERR((void *)sb_in_phys)) {
+		PERR("could not allocte in kernel pmem buffers for sb_in");
+		sb_in_phys = 0;
+		rc = -ENOMEM;
+		goto class_device_destroy;
+	}
+	PDEBUG("physical_addr for sb_in: 0x%x", sb_in_phys);
+
+	sb_in_virt = (u8 *) ioremap((unsigned long)sb_in_phys,
+			sb_in_length);
+	if (!sb_in_virt) {
+		PERR("Shared buffer IN allocation failed.");
+		rc = -ENOMEM;
+		goto class_device_destroy;
+	}
+	PDEBUG("sb_in virt address: %p, phys address: 0x%x",
+			sb_in_virt, tzcom_virt_to_phys(sb_in_virt));
+
+	sb_out_phys = pmem_kalloc(sb_out_length, PMEM_MEMTYPE_EBI1 |
+			PMEM_ALIGNMENT_4K);
+	if (IS_ERR((void *)sb_out_phys)) {
+		PERR("could not allocte in kernel pmem buffers for sb_out");
+		sb_out_phys = 0;
+		rc = -ENOMEM;
+		goto class_device_destroy;
+	}
+	PDEBUG("physical_addr for sb_out: 0x%x", sb_out_phys);
+
+	sb_out_virt = (u8 *) ioremap((unsigned long)sb_out_phys,
+			sb_out_length);
+	if (!sb_out_virt) {
+		PERR("Shared buffer OUT allocation failed.");
+		rc = -ENOMEM;
+		goto class_device_destroy;
+	}
+	PDEBUG("sb_out virt address: %p, phys address: 0x%x",
+			sb_out_virt, tzcom_virt_to_phys(sb_out_virt));
+	ion_clnt = msm_ion_client_create(0x03, "tzcom");
+	/* Initialized in tzcom_open */
+	pil = NULL;
+
+	tzcom_perf_client = msm_bus_scale_register_client(
+					&tzcom_bus_pdata);
+	if (!tzcom_perf_client)
+		pr_err("Unable to register bus client");
+
+	tzcom_bus_clk = clk_get(class_dev, "bus_clk");
+	if (IS_ERR(tzcom_bus_clk)) {
+		tzcom_bus_clk = NULL;
+	} else  if (tzcom_bus_clk != NULL) {
+		pr_debug("Enabled DFAB clock\n");
+		clk_set_rate(tzcom_bus_clk, 64000000);
+	}
+	return 0;
+
+class_device_destroy:
+	if (sb_in_virt)
+		iounmap(sb_in_virt);
+	if (sb_in_phys)
+		pmem_kfree(sb_in_phys);
+	if (sb_out_virt)
+		iounmap(sb_out_virt);
+	if (sb_out_phys)
+		pmem_kfree(sb_out_phys);
+	device_destroy(driver_class, tzcom_device_no);
+class_destroy:
+	class_destroy(driver_class);
+unregister_chrdev_region:
+	unregister_chrdev_region(tzcom_device_no, 1);
+	return rc;
+}
+
+static void __exit tzcom_exit(void)
+{
+	PDEBUG("Goodbye tzcom");
+	if (sb_in_virt)
+		iounmap(sb_in_virt);
+	if (sb_in_phys)
+		pmem_kfree(sb_in_phys);
+	if (sb_out_virt)
+		iounmap(sb_out_virt);
+	if (sb_out_phys)
+		pmem_kfree(sb_out_phys);
+	if (pil != NULL) {
+		pil_put(pil);
+		pil = NULL;
+	}
+	clk_put(tzcom_bus_clk);
+	device_destroy(driver_class, tzcom_device_no);
+	class_destroy(driver_class);
+	unregister_chrdev_region(tzcom_device_no, 1);
+	ion_client_destroy(ion_clnt);
+}
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sachin Shah <sachins@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm TrustZone Communicator");
+MODULE_VERSION("1.00");
+
+module_init(tzcom_init);
+module_exit(tzcom_exit);
diff --git a/drivers/misc/tzcomi.h b/drivers/misc/tzcomi.h
new file mode 100644
index 0000000..33634cf
--- /dev/null
+++ b/drivers/misc/tzcomi.h
@@ -0,0 +1,112 @@
+/* Qualcomm TrustZone communicator driver
+ *
+ * Copyright (c) 2011, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __TZCOMI_H_
+#define __TZCOMI_H_
+
+#include <linux/types.h>
+
+enum tz_sched_cmd_id {
+	TZ_SCHED_CMD_ID_INVALID      = 0,
+	TZ_SCHED_CMD_ID_INIT_SB_OUT,    /**< Initialize the shared buffer */
+	TZ_SCHED_CMD_ID_INIT_SB_LOG,    /**< Initialize the logging shared buf */
+	TZ_SCHED_CMD_ID_UNKNOWN         = 0x7FFFFFFE,
+	TZ_SCHED_CMD_ID_MAX             = 0x7FFFFFFF
+};
+
+enum tz_sched_cmd_type {
+	TZ_SCHED_CMD_INVALID = 0,
+	TZ_SCHED_CMD_NEW,      /** New TZ Scheduler Command */
+	TZ_SCHED_CMD_PENDING,  /** Pending cmd...sched will restore stack */
+	TZ_SCHED_CMD_COMPLETE, /** TZ sched command is complete */
+	TZ_SCHED_CMD_MAX     = 0x7FFFFFFF
+};
+
+enum tz_sched_cmd_status {
+	TZ_SCHED_STATUS_INCOMPLETE = 0,
+	TZ_SCHED_STATUS_COMPLETE,
+	TZ_SCHED_STATUS_MAX  = 0x7FFFFFFF
+};
+
+/** Command structure for initializing shared buffers (SB_OUT
+    and SB_LOG)
+*/
+__packed struct tz_pr_init_sb_req_s {
+	/** First 4 bytes should always be command id
+	 * from enum tz_sched_cmd_id */
+	uint32_t                  pr_cmd;
+	/** Pointer to the physical location of sb_out buffer */
+	uint32_t                  sb_ptr;
+	/** length of shared buffer */
+	uint32_t                  sb_len;
+};
+
+
+__packed struct tz_pr_init_sb_rsp_s {
+	/** First 4 bytes should always be command id
+	 * from enum tz_sched_cmd_id */
+	uint32_t                  pr_cmd;
+	/** Return code, 0 for success, Approp error code otherwise */
+	int32_t                   ret;
+};
+
+
+/**
+ * struct tzcom_command - tzcom command buffer
+ * @cmd_type: value from enum tz_sched_cmd_type
+ * @sb_in_cmd_addr: points to physical location of command
+ *                buffer
+ * @sb_in_cmd_len: length of command buffer
+ */
+__packed struct tzcom_command {
+	uint32_t               cmd_type;
+	uint8_t                *sb_in_cmd_addr;
+	uint32_t               sb_in_cmd_len;
+};
+
+/**
+ * struct tzcom_response - tzcom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_rsp_len: length of command response
+ */
+__packed struct tzcom_response {
+	uint32_t                 cmd_status;
+	uint8_t                  *sb_in_rsp_addr;
+	uint32_t                 sb_in_rsp_len;
+};
+
+/**
+ * struct tzcom_callback - tzcom callback buffer
+ * @cmd_id: command to run in registered service
+ * @sb_out_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_cmd_len: length of command response
+ *
+ * A callback buffer would be laid out in sb_out as follows:
+ *
+ *     --------------------- <--- struct tzcom_callback
+ *     | callback header   |
+ *     --------------------- <--- tzcom_callback.sb_out_cb_data_off
+ *     | callback data     |
+ *     ---------------------
+ */
+__packed struct tzcom_callback {
+	uint32_t cmd_id;
+	uint32_t sb_out_cb_data_len;
+	uint32_t sb_out_cb_data_off;
+};
+
+#endif /* __TZCOMI_H_ */