Merge tag 'char-misc-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc

Pull char/misc driver patches from Greg Kroah-Hartman:
 "Here's the big char/misc driver patches for 3.9-rc1.

  Nothing major here, just lots of different driver updates (mei,
  hyperv, ipack, extcon, vmci, etc.).

  All of these have been in the linux-next tree for a while."

* tag 'char-misc-3.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (209 commits)
  w1: w1_therm: Add force-pullup option for "broken" sensors
  w1: ds2482: Added 1-Wire pull-up support to the driver
  vme: add missing put_device() after device_register() fails
  extcon: max8997: Use workqueue to check cable state after completing boot of platform
  extcon: max8997: Set default UART/USB path on probe
  extcon: max8997: Consolidate duplicate code for checking ADC/CHG cable type
  extcon: max8997: Set default of ADC debounce time during initialization
  extcon: max8997: Remove duplicate code related to set H/W line path
  extcon: max8997: Move defined constant to header file
  extcon: max77693: Make max77693_extcon_cable static
  extcon: max8997: Remove unreachable code
  extcon: max8997: Make max8997_extcon_cable static
  extcon: max77693: Remove unnecessary goto statement to improve readability
  extcon: max77693: Convert to devm_input_allocate_device()
  extcon: gpio: Rename filename of extcon-gpio.c according to kernel naming style
  CREDITS: update email and address of Harald Hoyer
  extcon: arizona: Use MICDET for final microphone identification
  extcon: arizona: Always take the first HPDET reading as the final one
  extcon: arizona: Clear _trig_sts bits after jack detection
  extcon: arizona: Don't HPDET magic when headphones are enabled
  ...
diff --git a/CREDITS b/CREDITS
index 2346b09..948e0fb 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1572,12 +1572,12 @@
 S: USA
 
 N: Harald Hoyer
-E: harald.hoyer@parzelle.de
-W: http://parzelle.de/
+E: harald@redhat.com
+W: http://www.harald-hoyer.de
 D: ip_masq_quake
 D: md boot support
-S: Hohe Strasse 30
-S: D-70176 Stuttgart
+S: Am Strand 5
+S: D-19063 Schwerin
 S: Germany
 
 N: Jan Hubicka
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index ddb05e9..9561815 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -984,7 +984,7 @@
 		return errno;
 	}
 	configfd = open("/sys/class/uio/uio0/device/config", O_RDWR);
-	if (uiofd < 0) {
+	if (configfd < 0) {
 		perror("config open:");
 		return errno;
 	}
diff --git a/Documentation/w1/slaves/w1_therm b/Documentation/w1/slaves/w1_therm
index 874a8ca..cc62a95 100644
--- a/Documentation/w1/slaves/w1_therm
+++ b/Documentation/w1/slaves/w1_therm
@@ -34,9 +34,16 @@
 precision (which would also reduce the conversion time).
 
 The module parameter strong_pullup can be set to 0 to disable the
-strong pullup or 1 to enable.  If enabled the 5V strong pullup will be
-enabled when the conversion is taking place provided the master driver
-must support the strong pullup (or it falls back to a pullup
+strong pullup, 1 to enable autodetection or 2 to force strong pullup.
+In case of autodetection, the driver will use the "READ POWER SUPPLY"
+command to check if there are pariste powered devices on the bus.
+If so, it will activate the master's strong pullup.
+In case the detection of parasite devices using this command fails
+(seems to be the case with some DS18S20) the strong pullup can
+be force-enabled.
+If the strong pullup is enabled, the master's strong pullup will be
+driven when the conversion is taking place, provided the master driver
+does support the strong pullup (or it falls back to a pullup
 resistor).  The DS18b20 temperature sensor specification lists a
 maximum current draw of 1.5mA and that a 5k pullup resistor is not
 sufficient.  The strong pullup is designed to provide the additional
diff --git a/MAINTAINERS b/MAINTAINERS
index eac5eda..f5b9851 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5405,6 +5405,13 @@
 F:	Documentation/scsi/NinjaSCSI.txt
 F:	drivers/scsi/nsp32*
 
+NTB DRIVER
+M:	Jon Mason <jon.mason@intel.com>
+S:	Supported
+F:	drivers/ntb/
+F:	drivers/net/ntb_netdev.c
+F:	include/linux/ntb.h
+
 NTFS FILESYSTEM
 M:	Anton Altaparmakov <anton@tuxera.com>
 L:	linux-ntfs-dev@lists.sourceforge.net
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 2b4e89b..202fa6d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -152,6 +152,8 @@
 
 source "drivers/iio/Kconfig"
 
+source "drivers/ntb/Kconfig"
+
 source "drivers/vme/Kconfig"
 
 source "drivers/pwm/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index a8d32f1..b359948 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -147,3 +147,4 @@
 obj-$(CONFIG_IIO)		+= iio/
 obj-$(CONFIG_VME_BUS)		+= vme/
 obj-$(CONFIG_IPACK_BUS)		+= ipack/
+obj-$(CONFIG_NTB)		+= ntb/
diff --git a/drivers/char/hw_random/exynos-rng.c b/drivers/char/hw_random/exynos-rng.c
index 4673fc4..ac47631 100644
--- a/drivers/char/hw_random/exynos-rng.c
+++ b/drivers/char/hw_random/exynos-rng.c
@@ -163,7 +163,7 @@
 }
 
 
-UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
+static UNIVERSAL_DEV_PM_OPS(exynos_rng_pm_ops, exynos_rng_runtime_suspend,
 					exynos_rng_runtime_resume, NULL);
 
 static struct platform_driver exynos_rng_driver = {
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index c6fa3bc..6f6e92a 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -399,7 +399,7 @@
 {
 	unsigned long p = *ppos;
 	ssize_t low_count, read, sz;
-	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
+	char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
 	int err = 0;
 
 	read = 0;
@@ -527,7 +527,7 @@
 	unsigned long p = *ppos;
 	ssize_t wrote = 0;
 	ssize_t virtr = 0;
-	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+	char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
 	int err = 0;
 
 	if (p < (unsigned long) high_memory) {
@@ -595,7 +595,7 @@
 			  size_t count, loff_t *ppos)
 {
 	unsigned long i = *ppos;
-	const char __user * tmp = buf;
+	const char __user *tmp = buf;
 
 	if (!access_ok(VERIFY_READ, buf, count))
 		return -EFAULT;
@@ -729,7 +729,7 @@
 	return ret;
 }
 
-static int open_port(struct inode * inode, struct file * filp)
+static int open_port(struct inode *inode, struct file *filp)
 {
 	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
 }
@@ -898,7 +898,7 @@
 			continue;
 
 		/*
-		 * Create /dev/port? 
+		 * Create /dev/port?
 		 */
 		if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
 			continue;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index d0c9852..5c5cc00 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -102,8 +102,7 @@
 	ASYNC_PARITY_NONE		/* unsigned char parity; */
 };
 
-typedef struct
-{
+typedef struct {
 	int count;
 	unsigned char status;
 	char data[1];
@@ -326,10 +325,10 @@
 #define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg))
 
 #define set_reg_bits(info, reg, mask) \
-    write_reg(info, (reg), \
+	write_reg(info, (reg), \
 		 (unsigned char) (read_reg(info, (reg)) | (mask)))
 #define clear_reg_bits(info, reg, mask) \
-    write_reg(info, (reg), \
+	write_reg(info, (reg), \
 		 (unsigned char) (read_reg(info, (reg)) & ~(mask)))
 /*
  * interrupt enable/disable routines
@@ -356,10 +355,10 @@
 }
 
 #define port_irq_disable(info, mask) \
-  { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
+	{ info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); }
 
 #define port_irq_enable(info, mask) \
-  { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
+	{ info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); }
 
 static void rx_start(MGSLPC_INFO *info);
 static void rx_stop(MGSLPC_INFO *info);
@@ -397,7 +396,7 @@
 
 static int claim_resources(MGSLPC_INFO *info);
 static void release_resources(MGSLPC_INFO *info);
-static void mgslpc_add_device(MGSLPC_INFO *info);
+static int mgslpc_add_device(MGSLPC_INFO *info);
 static void mgslpc_remove_device(MGSLPC_INFO *info);
 
 static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty);
@@ -514,49 +513,56 @@
 
 static int mgslpc_probe(struct pcmcia_device *link)
 {
-    MGSLPC_INFO *info;
-    int ret;
+	MGSLPC_INFO *info;
+	int ret;
 
-    if (debug_level >= DEBUG_LEVEL_INFO)
-	    printk("mgslpc_attach\n");
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("mgslpc_attach\n");
 
-    info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
-    if (!info) {
-	    printk("Error can't allocate device instance data\n");
-	    return -ENOMEM;
-    }
+	info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL);
+	if (!info) {
+		printk("Error can't allocate device instance data\n");
+		return -ENOMEM;
+	}
 
-    info->magic = MGSLPC_MAGIC;
-    tty_port_init(&info->port);
-    info->port.ops = &mgslpc_port_ops;
-    INIT_WORK(&info->task, bh_handler);
-    info->max_frame_size = 4096;
-    info->port.close_delay = 5*HZ/10;
-    info->port.closing_wait = 30*HZ;
-    init_waitqueue_head(&info->status_event_wait_q);
-    init_waitqueue_head(&info->event_wait_q);
-    spin_lock_init(&info->lock);
-    spin_lock_init(&info->netlock);
-    memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
-    info->idle_mode = HDLC_TXIDLE_FLAGS;
-    info->imra_value = 0xffff;
-    info->imrb_value = 0xffff;
-    info->pim_value = 0xff;
+	info->magic = MGSLPC_MAGIC;
+	tty_port_init(&info->port);
+	info->port.ops = &mgslpc_port_ops;
+	INIT_WORK(&info->task, bh_handler);
+	info->max_frame_size = 4096;
+	info->port.close_delay = 5*HZ/10;
+	info->port.closing_wait = 30*HZ;
+	init_waitqueue_head(&info->status_event_wait_q);
+	init_waitqueue_head(&info->event_wait_q);
+	spin_lock_init(&info->lock);
+	spin_lock_init(&info->netlock);
+	memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
+	info->idle_mode = HDLC_TXIDLE_FLAGS;
+	info->imra_value = 0xffff;
+	info->imrb_value = 0xffff;
+	info->pim_value = 0xff;
 
-    info->p_dev = link;
-    link->priv = info;
+	info->p_dev = link;
+	link->priv = info;
 
-    /* Initialize the struct pcmcia_device structure */
+	/* Initialize the struct pcmcia_device structure */
 
-    ret = mgslpc_config(link);
-    if (ret) {
-	    tty_port_destroy(&info->port);
-	    return ret;
-    }
+	ret = mgslpc_config(link);
+	if (ret != 0)
+		goto failed;
 
-    mgslpc_add_device(info);
+	ret = mgslpc_add_device(info);
+	if (ret != 0)
+		goto failed_release;
 
-    return 0;
+	return 0;
+
+failed_release:
+	mgslpc_release((u_long)link);
+failed:
+	tty_port_destroy(&info->port);
+	kfree(info);
+	return ret;
 }
 
 /* Card has been inserted.
@@ -569,35 +575,35 @@
 
 static int mgslpc_config(struct pcmcia_device *link)
 {
-    MGSLPC_INFO *info = link->priv;
-    int ret;
+	MGSLPC_INFO *info = link->priv;
+	int ret;
 
-    if (debug_level >= DEBUG_LEVEL_INFO)
-	    printk("mgslpc_config(0x%p)\n", link);
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("mgslpc_config(0x%p)\n", link);
 
-    link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
+	link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
 
-    ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
-    if (ret != 0)
-	    goto failed;
+	ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL);
+	if (ret != 0)
+		goto failed;
 
-    link->config_index = 8;
-    link->config_regs = PRESENT_OPTION;
+	link->config_index = 8;
+	link->config_regs = PRESENT_OPTION;
 
-    ret = pcmcia_request_irq(link, mgslpc_isr);
-    if (ret)
-	    goto failed;
-    ret = pcmcia_enable_device(link);
-    if (ret)
-	    goto failed;
+	ret = pcmcia_request_irq(link, mgslpc_isr);
+	if (ret)
+		goto failed;
+	ret = pcmcia_enable_device(link);
+	if (ret)
+		goto failed;
 
-    info->io_base = link->resource[0]->start;
-    info->irq_level = link->irq;
-    return 0;
+	info->io_base = link->resource[0]->start;
+	info->irq_level = link->irq;
+	return 0;
 
 failed:
-    mgslpc_release((u_long)link);
-    return -ENODEV;
+	mgslpc_release((u_long)link);
+	return -ENODEV;
 }
 
 /* Card has been removed.
@@ -703,12 +709,12 @@
 	if (mgslpc_paranoia_check(info, tty->name, "tx_pause"))
 		return;
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("tx_pause(%s)\n",info->device_name);
+		printk("tx_pause(%s)\n", info->device_name);
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	if (info->tx_enabled)
-	 	tx_stop(info);
-	spin_unlock_irqrestore(&info->lock,flags);
+		tx_stop(info);
+	spin_unlock_irqrestore(&info->lock, flags);
 }
 
 static void tx_release(struct tty_struct *tty)
@@ -719,12 +725,12 @@
 	if (mgslpc_paranoia_check(info, tty->name, "tx_release"))
 		return;
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("tx_release(%s)\n",info->device_name);
+		printk("tx_release(%s)\n", info->device_name);
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	if (!info->tx_enabled)
-	 	tx_start(info, tty);
-	spin_unlock_irqrestore(&info->lock,flags);
+		tx_start(info, tty);
+	spin_unlock_irqrestore(&info->lock, flags);
 }
 
 /* Return next bottom half action to perform.
@@ -735,7 +741,7 @@
 	unsigned long flags;
 	int rc = 0;
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 
 	if (info->pending_bh & BH_RECEIVE) {
 		info->pending_bh &= ~BH_RECEIVE;
@@ -754,7 +760,7 @@
 		info->bh_requested = false;
 	}
 
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	return rc;
 }
@@ -766,7 +772,7 @@
 	int action;
 
 	if (debug_level >= DEBUG_LEVEL_BH)
-		printk( "%s(%d):bh_handler(%s) entry\n",
+		printk("%s(%d):bh_handler(%s) entry\n",
 			__FILE__,__LINE__,info->device_name);
 
 	info->bh_running = true;
@@ -775,8 +781,8 @@
 	while((action = bh_action(info)) != 0) {
 
 		/* Process work item */
-		if ( debug_level >= DEBUG_LEVEL_BH )
-			printk( "%s(%d):bh_handler() work item action=%d\n",
+		if (debug_level >= DEBUG_LEVEL_BH)
+			printk("%s(%d):bh_handler() work item action=%d\n",
 				__FILE__,__LINE__,action);
 
 		switch (action) {
@@ -799,7 +805,7 @@
 
 	tty_kref_put(tty);
 	if (debug_level >= DEBUG_LEVEL_BH)
-		printk( "%s(%d):bh_handler(%s) exit\n",
+		printk("%s(%d):bh_handler(%s) exit\n",
 			__FILE__,__LINE__,info->device_name);
 }
 
@@ -828,7 +834,7 @@
 	RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size));
 
 	if (debug_level >= DEBUG_LEVEL_ISR)
-		printk("%s(%d):rx_ready_hdlc(eom=%d)\n",__FILE__,__LINE__,eom);
+		printk("%s(%d):rx_ready_hdlc(eom=%d)\n", __FILE__, __LINE__, eom);
 
 	if (!info->rx_enabled)
 		return;
@@ -844,7 +850,8 @@
 
 	if (eom) {
 		/* end of frame, get FIFO count from RBCL register */
-		if (!(fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f)))
+		fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f);
+		if (fifo_count == 0)
 			fifo_count = 32;
 	} else
 		fifo_count = 32;
@@ -889,7 +896,7 @@
 	unsigned char data, status, flag;
 	int fifo_count;
 	int work = 0;
- 	struct mgsl_icount *icount = &info->icount;
+	struct mgsl_icount *icount = &info->icount;
 
 	if (tcd) {
 		/* early termination, get FIFO count from RBCL register */
@@ -994,7 +1001,7 @@
 	int c;
 
 	if (debug_level >= DEBUG_LEVEL_ISR)
-		printk("%s(%d):tx_ready(%s)\n", __FILE__,__LINE__,info->device_name);
+		printk("%s(%d):tx_ready(%s)\n", __FILE__, __LINE__, info->device_name);
 
 	if (info->params.mode == MGSL_MODE_HDLC) {
 		if (!info->tx_active)
@@ -1239,7 +1246,7 @@
 	 */
 
 	if (info->pending_bh && !info->bh_running && !info->bh_requested) {
-		if ( debug_level >= DEBUG_LEVEL_ISR )
+		if (debug_level >= DEBUG_LEVEL_ISR)
 			printk("%s(%d):%s queueing bh task.\n",
 				__FILE__,__LINE__,info->device_name);
 		schedule_work(&info->task);
@@ -1263,7 +1270,7 @@
 	int retval = 0;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name);
+		printk("%s(%d):startup(%s)\n", __FILE__, __LINE__, info->device_name);
 
 	if (info->port.flags & ASYNC_INITIALIZED)
 		return 0;
@@ -1273,7 +1280,7 @@
 		info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL);
 		if (!info->tx_buf) {
 			printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n",
-				__FILE__,__LINE__,info->device_name);
+				__FILE__, __LINE__, info->device_name);
 			return -ENOMEM;
 		}
 	}
@@ -1288,15 +1295,15 @@
 	retval = claim_resources(info);
 
 	/* perform existence check and diagnostics */
-	if ( !retval )
+	if (!retval)
 		retval = adapter_test(info);
 
-	if ( retval ) {
-  		if (capable(CAP_SYS_ADMIN) && tty)
+	if (retval) {
+		if (capable(CAP_SYS_ADMIN) && tty)
 			set_bit(TTY_IO_ERROR, &tty->flags);
 		release_resources(info);
-  		return retval;
-  	}
+		return retval;
+	}
 
 	/* program hardware for current parameters */
 	mgslpc_change_params(info, tty);
@@ -1320,7 +1327,7 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_shutdown(%s)\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	/* clear status wait queue because status changes */
 	/* can't happen after shutting down the hardware */
@@ -1334,7 +1341,7 @@
 		info->tx_buf = NULL;
 	}
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 
 	rx_stop(info);
 	tx_stop(info);
@@ -1342,12 +1349,12 @@
 	/* TODO:disable interrupts instead of reset to preserve signal states */
 	reset_device(info);
 
- 	if (!tty || tty->termios.c_cflag & HUPCL) {
+	if (!tty || tty->termios.c_cflag & HUPCL) {
 		info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
 		set_signals(info);
 	}
 
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	release_resources(info);
 
@@ -1361,7 +1368,7 @@
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 
 	rx_stop(info);
 	tx_stop(info);
@@ -1386,7 +1393,7 @@
 	if (info->netcount || (tty && (tty->termios.c_cflag & CREAD)))
 		rx_start(info);
 
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 }
 
 /* Reconfigure adapter based on new parameters
@@ -1401,13 +1408,13 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_change_params(%s)\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	cflag = tty->termios.c_cflag;
 
 	/* if B0 rate (hangup) specified then negate RTS and DTR */
 	/* otherwise assert RTS and DTR */
- 	if (cflag & CBAUD)
+	if (cflag & CBAUD)
 		info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
 	else
 		info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
@@ -1453,7 +1460,7 @@
 		info->params.data_rate = tty_get_baud_rate(tty);
 	}
 
-	if ( info->params.data_rate ) {
+	if (info->params.data_rate) {
 		info->timeout = (32*HZ*bits_per_char) /
 				info->params.data_rate;
 	}
@@ -1488,8 +1495,8 @@
 	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO) {
-		printk( "%s(%d):mgslpc_put_char(%d) on %s\n",
-			__FILE__,__LINE__,ch,info->device_name);
+		printk("%s(%d):mgslpc_put_char(%d) on %s\n",
+			__FILE__, __LINE__, ch, info->device_name);
 	}
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
@@ -1498,7 +1505,7 @@
 	if (!info->tx_buf)
 		return 0;
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 
 	if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) {
 		if (info->tx_count < TXBUFSIZE - 1) {
@@ -1508,7 +1515,7 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 	return 1;
 }
 
@@ -1521,8 +1528,8 @@
 	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk( "%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
-			__FILE__,__LINE__,info->device_name,info->tx_count);
+		printk("%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n",
+			__FILE__, __LINE__, info->device_name, info->tx_count);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars"))
 		return;
@@ -1532,13 +1539,13 @@
 		return;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk( "%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
-			__FILE__,__LINE__,info->device_name);
+		printk("%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n",
+			__FILE__, __LINE__, info->device_name);
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	if (!info->tx_active)
-	 	tx_start(info, tty);
-	spin_unlock_irqrestore(&info->lock,flags);
+		tx_start(info, tty);
+	spin_unlock_irqrestore(&info->lock, flags);
 }
 
 /* Send a block of data
@@ -1559,8 +1566,8 @@
 	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk( "%s(%d):mgslpc_write(%s) count=%d\n",
-			__FILE__,__LINE__,info->device_name,count);
+		printk("%s(%d):mgslpc_write(%s) count=%d\n",
+			__FILE__, __LINE__, info->device_name, count);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") ||
 		!info->tx_buf)
@@ -1586,26 +1593,26 @@
 
 		memcpy(info->tx_buf + info->tx_put, buf, c);
 
-		spin_lock_irqsave(&info->lock,flags);
+		spin_lock_irqsave(&info->lock, flags);
 		info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1);
 		info->tx_count += c;
-		spin_unlock_irqrestore(&info->lock,flags);
+		spin_unlock_irqrestore(&info->lock, flags);
 
 		buf += c;
 		count -= c;
 		ret += c;
 	}
 start:
- 	if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
-		spin_lock_irqsave(&info->lock,flags);
+	if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
+		spin_lock_irqsave(&info->lock, flags);
 		if (!info->tx_active)
-		 	tx_start(info, tty);
-		spin_unlock_irqrestore(&info->lock,flags);
- 	}
+			tx_start(info, tty);
+		spin_unlock_irqrestore(&info->lock, flags);
+	}
 cleanup:
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk( "%s(%d):mgslpc_write(%s) returning=%d\n",
-			__FILE__,__LINE__,info->device_name,ret);
+		printk("%s(%d):mgslpc_write(%s) returning=%d\n",
+			__FILE__, __LINE__, info->device_name, ret);
 	return ret;
 }
 
@@ -1633,7 +1640,7 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_write_room(%s)=%d\n",
-			 __FILE__,__LINE__, info->device_name, ret);
+			 __FILE__, __LINE__, info->device_name, ret);
 	return ret;
 }
 
@@ -1646,7 +1653,7 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_chars_in_buffer(%s)\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer"))
 		return 0;
@@ -1658,7 +1665,7 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n",
-			 __FILE__,__LINE__, info->device_name, rc);
+			 __FILE__, __LINE__, info->device_name, rc);
 
 	return rc;
 }
@@ -1672,15 +1679,15 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_flush_buffer(%s) entry\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer"))
 		return;
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	info->tx_count = info->tx_put = info->tx_get = 0;
 	del_timer(&info->tx_timer);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	wake_up_interruptible(&tty->write_wait);
 	tty_wakeup(tty);
@@ -1695,17 +1702,17 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_send_xchar(%s,%d)\n",
-			 __FILE__,__LINE__, info->device_name, ch );
+			 __FILE__, __LINE__, info->device_name, ch);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar"))
 		return;
 
 	info->x_char = ch;
 	if (ch) {
-		spin_lock_irqsave(&info->lock,flags);
+		spin_lock_irqsave(&info->lock, flags);
 		if (!info->tx_enabled)
-		 	tx_start(info, tty);
-		spin_unlock_irqrestore(&info->lock,flags);
+			tx_start(info, tty);
+		spin_unlock_irqrestore(&info->lock, flags);
 	}
 }
 
@@ -1718,7 +1725,7 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_throttle(%s) entry\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle"))
 		return;
@@ -1726,11 +1733,11 @@
 	if (I_IXOFF(tty))
 		mgslpc_send_xchar(tty, STOP_CHAR(tty));
 
- 	if (tty->termios.c_cflag & CRTSCTS) {
-		spin_lock_irqsave(&info->lock,flags);
+	if (tty->termios.c_cflag & CRTSCTS) {
+		spin_lock_irqsave(&info->lock, flags);
 		info->serial_signals &= ~SerialSignal_RTS;
-	 	set_signals(info);
-		spin_unlock_irqrestore(&info->lock,flags);
+		set_signals(info);
+		spin_unlock_irqrestore(&info->lock, flags);
 	}
 }
 
@@ -1743,7 +1750,7 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_unthrottle(%s) entry\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle"))
 		return;
@@ -1755,11 +1762,11 @@
 			mgslpc_send_xchar(tty, START_CHAR(tty));
 	}
 
- 	if (tty->termios.c_cflag & CRTSCTS) {
-		spin_lock_irqsave(&info->lock,flags);
+	if (tty->termios.c_cflag & CRTSCTS) {
+		spin_lock_irqsave(&info->lock, flags);
 		info->serial_signals |= SerialSignal_RTS;
-	 	set_signals(info);
-		spin_unlock_irqrestore(&info->lock,flags);
+		set_signals(info);
+		spin_unlock_irqrestore(&info->lock, flags);
 	}
 }
 
@@ -1797,33 +1804,33 @@
  *
  * Arguments:
  *
- * 	info		pointer to device instance data
- * 	new_params	user buffer containing new serial params
+ *	info		pointer to device instance data
+ *	new_params	user buffer containing new serial params
  *
  * Returns:	0 if success, otherwise error code
  */
 static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty)
 {
- 	unsigned long flags;
+	unsigned long flags;
 	MGSL_PARAMS tmp_params;
 	int err;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):set_params %s\n", __FILE__,__LINE__,
-			info->device_name );
+			info->device_name);
 	COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS));
 	if (err) {
-		if ( debug_level >= DEBUG_LEVEL_INFO )
-			printk( "%s(%d):set_params(%s) user buffer copy failed\n",
-				__FILE__,__LINE__,info->device_name);
+		if (debug_level >= DEBUG_LEVEL_INFO)
+			printk("%s(%d):set_params(%s) user buffer copy failed\n",
+				__FILE__, __LINE__, info->device_name);
 		return -EFAULT;
 	}
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS));
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
- 	mgslpc_change_params(info, tty);
+	mgslpc_change_params(info, tty);
 
 	return 0;
 }
@@ -1841,13 +1848,13 @@
 
 static int set_txidle(MGSLPC_INFO * info, int idle_mode)
 {
- 	unsigned long flags;
+	unsigned long flags;
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("set_txidle(%s,%d)\n", info->device_name, idle_mode);
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	info->idle_mode = idle_mode;
 	tx_set_idle(info);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 	return 0;
 }
 
@@ -1864,11 +1871,11 @@
 
 static int set_interface(MGSLPC_INFO * info, int if_mode)
 {
- 	unsigned long flags;
+	unsigned long flags;
 	unsigned char val;
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("set_interface(%s,%d)\n", info->device_name, if_mode);
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	info->if_mode = if_mode;
 
 	val = read_reg(info, PVR) & 0x0f;
@@ -1880,18 +1887,18 @@
 	}
 	write_reg(info, PVR, val);
 
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 	return 0;
 }
 
 static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty)
 {
- 	unsigned long flags;
+	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("set_txenable(%s,%d)\n", info->device_name, enable);
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	if (enable) {
 		if (!info->tx_enabled)
 			tx_start(info, tty);
@@ -1899,18 +1906,18 @@
 		if (info->tx_enabled)
 			tx_stop(info);
 	}
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 	return 0;
 }
 
 static int tx_abort(MGSLPC_INFO * info)
 {
- 	unsigned long flags;
+	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("tx_abort(%s)\n", info->device_name);
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	if (info->tx_active && info->tx_count &&
 	    info->params.mode == MGSL_MODE_HDLC) {
 		/* clear data count so FIFO is not filled on next IRQ.
@@ -1919,18 +1926,18 @@
 		info->tx_count = info->tx_put = info->tx_get = 0;
 		info->tx_aborting = true;
 	}
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 	return 0;
 }
 
 static int set_rxenable(MGSLPC_INFO * info, int enable)
 {
- 	unsigned long flags;
+	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("set_rxenable(%s,%d)\n", info->device_name, enable);
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	if (enable) {
 		if (!info->rx_enabled)
 			rx_start(info);
@@ -1938,21 +1945,21 @@
 		if (info->rx_enabled)
 			rx_stop(info);
 	}
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 	return 0;
 }
 
 /* wait for specified event to occur
  *
- * Arguments:	 	info	pointer to device instance data
- * 			mask	pointer to bitmask of events to wait for
- * Return Value:	0 	if successful and bit mask updated with
+ * Arguments:		info	pointer to device instance data
+ *			mask	pointer to bitmask of events to wait for
+ * Return Value:	0	if successful and bit mask updated with
  *				of events triggerred,
- * 			otherwise error code
+ *			otherwise error code
  */
 static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr)
 {
- 	unsigned long flags;
+	unsigned long flags;
 	int s;
 	int rc=0;
 	struct mgsl_icount cprev, cnow;
@@ -1968,18 +1975,18 @@
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("wait_events(%s,%d)\n", info->device_name, mask);
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 
 	/* return immediately if state matches requested events */
 	get_signals(info);
 	s = info->serial_signals;
 	events = mask &
 		( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) +
- 		  ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
+		  ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) +
 		  ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) +
 		  ((s & SerialSignal_RI)  ? MgslEvent_RiActive :MgslEvent_RiInactive) );
 	if (events) {
-		spin_unlock_irqrestore(&info->lock,flags);
+		spin_unlock_irqrestore(&info->lock, flags);
 		goto exit;
 	}
 
@@ -1994,7 +2001,7 @@
 	set_current_state(TASK_INTERRUPTIBLE);
 	add_wait_queue(&info->event_wait_q, &wait);
 
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 
 	for(;;) {
@@ -2005,11 +2012,11 @@
 		}
 
 		/* get current irq counts */
-		spin_lock_irqsave(&info->lock,flags);
+		spin_lock_irqsave(&info->lock, flags);
 		cnow = info->icount;
 		newsigs = info->input_signal_events;
 		set_current_state(TASK_INTERRUPTIBLE);
-		spin_unlock_irqrestore(&info->lock,flags);
+		spin_unlock_irqrestore(&info->lock, flags);
 
 		/* if no change, wait aborted for some reason */
 		if (newsigs.dsr_up   == oldsigs.dsr_up   &&
@@ -2048,10 +2055,10 @@
 	set_current_state(TASK_RUNNING);
 
 	if (mask & MgslEvent_ExitHuntMode) {
-		spin_lock_irqsave(&info->lock,flags);
+		spin_lock_irqsave(&info->lock, flags);
 		if (!waitqueue_active(&info->event_wait_q))
 			irq_disable(info, CHA, IRQ_EXITHUNT);
-		spin_unlock_irqrestore(&info->lock,flags);
+		spin_unlock_irqrestore(&info->lock, flags);
 	}
 exit:
 	if (rc == 0)
@@ -2061,17 +2068,17 @@
 
 static int modem_input_wait(MGSLPC_INFO *info,int arg)
 {
- 	unsigned long flags;
+	unsigned long flags;
 	int rc;
 	struct mgsl_icount cprev, cnow;
 	DECLARE_WAITQUEUE(wait, current);
 
 	/* save current irq counts */
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	cprev = info->icount;
 	add_wait_queue(&info->status_event_wait_q, &wait);
 	set_current_state(TASK_INTERRUPTIBLE);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	for(;;) {
 		schedule();
@@ -2081,10 +2088,10 @@
 		}
 
 		/* get new irq counts */
-		spin_lock_irqsave(&info->lock,flags);
+		spin_lock_irqsave(&info->lock, flags);
 		cnow = info->icount;
 		set_current_state(TASK_INTERRUPTIBLE);
-		spin_unlock_irqrestore(&info->lock,flags);
+		spin_unlock_irqrestore(&info->lock, flags);
 
 		/* if no change, wait aborted for some reason */
 		if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
@@ -2115,11 +2122,11 @@
 {
 	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
 	unsigned int result;
- 	unsigned long flags;
+	unsigned long flags;
 
-	spin_lock_irqsave(&info->lock,flags);
- 	get_signals(info);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
+	get_signals(info);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
 		((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) +
@@ -2130,7 +2137,7 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):%s tiocmget() value=%08X\n",
-			 __FILE__,__LINE__, info->device_name, result );
+			 __FILE__, __LINE__, info->device_name, result);
 	return result;
 }
 
@@ -2140,11 +2147,11 @@
 		    unsigned int set, unsigned int clear)
 {
 	MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
- 	unsigned long flags;
+	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):%s tiocmset(%x,%x)\n",
-			__FILE__,__LINE__,info->device_name, set, clear);
+			__FILE__, __LINE__, info->device_name, set, clear);
 
 	if (set & TIOCM_RTS)
 		info->serial_signals |= SerialSignal_RTS;
@@ -2155,9 +2162,9 @@
 	if (clear & TIOCM_DTR)
 		info->serial_signals &= ~SerialSignal_DTR;
 
-	spin_lock_irqsave(&info->lock,flags);
- 	set_signals(info);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
+	set_signals(info);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	return 0;
 }
@@ -2174,17 +2181,17 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_break(%s,%d)\n",
-			 __FILE__,__LINE__, info->device_name, break_state);
+			 __FILE__, __LINE__, info->device_name, break_state);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break"))
 		return -EINVAL;
 
-	spin_lock_irqsave(&info->lock,flags);
- 	if (break_state == -1)
+	spin_lock_irqsave(&info->lock, flags);
+	if (break_state == -1)
 		set_reg_bits(info, CHA+DAFO, BIT6);
 	else
 		clear_reg_bits(info, CHA+DAFO, BIT6);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 	return 0;
 }
 
@@ -2195,9 +2202,9 @@
 	struct mgsl_icount cnow;	/* kernel counter temps */
 	unsigned long flags;
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	cnow = info->icount;
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	icount->cts = cnow.cts;
 	icount->dsr = cnow.dsr;
@@ -2218,9 +2225,9 @@
  *
  * Arguments:
  *
- * 	tty	pointer to tty instance data
- * 	cmd	IOCTL command code
- * 	arg	command argument/context
+ *	tty	pointer to tty instance data
+ *	cmd	IOCTL command code
+ *	arg	command argument/context
  *
  * Return Value:	0 if success, otherwise error code
  */
@@ -2231,8 +2238,8 @@
 	void __user *argp = (void __user *)arg;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
-			info->device_name, cmd );
+		printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__, __LINE__,
+			info->device_name, cmd);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl"))
 		return -ENODEV;
@@ -2278,8 +2285,8 @@
  *
  * Arguments:
  *
- * 	tty		pointer to tty structure
- * 	termios		pointer to buffer to hold returned old termios
+ *	tty		pointer to tty structure
+ *	termios		pointer to buffer to hold returned old termios
  */
 static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
 {
@@ -2287,8 +2294,8 @@
 	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("%s(%d):mgslpc_set_termios %s\n", __FILE__,__LINE__,
-			tty->driver->name );
+		printk("%s(%d):mgslpc_set_termios %s\n", __FILE__, __LINE__,
+			tty->driver->name);
 
 	/* just return if nothing has changed */
 	if ((tty->termios.c_cflag == old_termios->c_cflag)
@@ -2302,22 +2309,22 @@
 	if (old_termios->c_cflag & CBAUD &&
 	    !(tty->termios.c_cflag & CBAUD)) {
 		info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
-		spin_lock_irqsave(&info->lock,flags);
-	 	set_signals(info);
-		spin_unlock_irqrestore(&info->lock,flags);
+		spin_lock_irqsave(&info->lock, flags);
+		set_signals(info);
+		spin_unlock_irqrestore(&info->lock, flags);
 	}
 
 	/* Handle transition away from B0 status */
 	if (!(old_termios->c_cflag & CBAUD) &&
 	    tty->termios.c_cflag & CBAUD) {
 		info->serial_signals |= SerialSignal_DTR;
- 		if (!(tty->termios.c_cflag & CRTSCTS) ||
- 		    !test_bit(TTY_THROTTLED, &tty->flags)) {
+		if (!(tty->termios.c_cflag & CRTSCTS) ||
+		    !test_bit(TTY_THROTTLED, &tty->flags)) {
 			info->serial_signals |= SerialSignal_RTS;
- 		}
-		spin_lock_irqsave(&info->lock,flags);
-	 	set_signals(info);
-		spin_unlock_irqrestore(&info->lock,flags);
+		}
+		spin_lock_irqsave(&info->lock, flags);
+		set_signals(info);
+		spin_unlock_irqrestore(&info->lock, flags);
 	}
 
 	/* Handle turning off CRTSCTS */
@@ -2338,15 +2345,15 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
-			 __FILE__,__LINE__, info->device_name, port->count);
+			 __FILE__, __LINE__, info->device_name, port->count);
 
 	WARN_ON(!port->count);
 
 	if (tty_port_close_start(port, tty, filp) == 0)
 		goto cleanup;
 
- 	if (port->flags & ASYNC_INITIALIZED)
- 		mgslpc_wait_until_sent(tty, info->timeout);
+	if (port->flags & ASYNC_INITIALIZED)
+		mgslpc_wait_until_sent(tty, info->timeout);
 
 	mgslpc_flush_buffer(tty);
 
@@ -2357,7 +2364,7 @@
 	tty_port_tty_set(port, NULL);
 cleanup:
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
+		printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
 			tty->driver->name, port->count);
 }
 
@@ -2368,12 +2375,12 @@
 	MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data;
 	unsigned long orig_jiffies, char_time;
 
-	if (!info )
+	if (!info)
 		return;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent"))
 		return;
@@ -2389,8 +2396,8 @@
 	 * Note: use tight timings here to satisfy the NIST-PCTS.
 	 */
 
-	if ( info->params.data_rate ) {
-	       	char_time = info->timeout/(32 * 5);
+	if (info->params.data_rate) {
+	     	char_time = info->timeout/(32 * 5);
 		if (!char_time)
 			char_time++;
 	} else
@@ -2421,7 +2428,7 @@
 exit:
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 }
 
 /* Called by tty_hangup() when a hangup is signaled.
@@ -2433,7 +2440,7 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_hangup(%s)\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup"))
 		return;
@@ -2448,9 +2455,9 @@
 	MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
 	unsigned long flags;
 
-	spin_lock_irqsave(&info->lock,flags);
- 	get_signals(info);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
+	get_signals(info);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	if (info->serial_signals & SerialSignal_DCD)
 		return 1;
@@ -2462,13 +2469,13 @@
 	MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port);
 	unsigned long flags;
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	if (onoff)
 		info->serial_signals |= SerialSignal_RTS | SerialSignal_DTR;
 	else
 		info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
 	set_signals(info);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 }
 
 
@@ -2476,14 +2483,14 @@
 {
 	MGSLPC_INFO	*info;
 	struct tty_port *port;
-	int 			retval, line;
-	unsigned long flags;
+	int		retval, line;
+	unsigned long	flags;
 
 	/* verify range of specified line number */
 	line = tty->index;
 	if (line >= mgslpc_device_count) {
 		printk("%s(%d):mgslpc_open with invalid line #%d.\n",
-			__FILE__,__LINE__,line);
+			__FILE__, __LINE__, line);
 		return -ENODEV;
 	}
 
@@ -2500,7 +2507,7 @@
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
-			 __FILE__,__LINE__,tty->driver->name, port->count);
+			 __FILE__, __LINE__, tty->driver->name, port->count);
 
 	/* If port is closing, signal caller to try again */
 	if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
@@ -2535,13 +2542,13 @@
 	if (retval) {
 		if (debug_level >= DEBUG_LEVEL_INFO)
 			printk("%s(%d):block_til_ready(%s) returned %d\n",
-				 __FILE__,__LINE__, info->device_name, retval);
+				 __FILE__, __LINE__, info->device_name, retval);
 		goto cleanup;
 	}
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):mgslpc_open(%s) success\n",
-			 __FILE__,__LINE__, info->device_name);
+			 __FILE__, __LINE__, info->device_name);
 	retval = 0;
 
 cleanup:
@@ -2561,9 +2568,9 @@
 		      info->device_name, info->io_base, info->irq_level);
 
 	/* output current serial signal states */
-	spin_lock_irqsave(&info->lock,flags);
- 	get_signals(info);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
+	get_signals(info);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	stat_buf[0] = 0;
 	stat_buf[1] = 0;
@@ -2625,7 +2632,7 @@
 	seq_printf(m, "synclink driver:%s\n", driver_version);
 
 	info = mgslpc_device_list;
-	while( info ) {
+	while (info) {
 		line_info(m, info);
 		info = info->next_device;
 	}
@@ -2686,8 +2693,8 @@
 
 static int claim_resources(MGSLPC_INFO *info)
 {
-	if (rx_alloc_buffers(info) < 0 ) {
-		printk( "Can't allocate rx buffer %s\n", info->device_name);
+	if (rx_alloc_buffers(info) < 0) {
+		printk("Can't allocate rx buffer %s\n", info->device_name);
 		release_resources(info);
 		return -ENODEV;
 	}
@@ -2706,8 +2713,12 @@
  *
  * Arguments:		info	pointer to device instance data
  */
-static void mgslpc_add_device(MGSLPC_INFO *info)
+static int mgslpc_add_device(MGSLPC_INFO *info)
 {
+	MGSLPC_INFO *current_dev = NULL;
+	struct device *tty_dev;
+	int ret;
+
 	info->next_device = NULL;
 	info->line = mgslpc_device_count;
 	sprintf(info->device_name,"ttySLP%d",info->line);
@@ -2722,8 +2733,8 @@
 	if (!mgslpc_device_list)
 		mgslpc_device_list = info;
 	else {
-		MGSLPC_INFO *current_dev = mgslpc_device_list;
-		while( current_dev->next_device )
+		current_dev = mgslpc_device_list;
+		while (current_dev->next_device)
 			current_dev = current_dev->next_device;
 		current_dev->next_device = info;
 	}
@@ -2733,14 +2744,34 @@
 	else if (info->max_frame_size > 65535)
 		info->max_frame_size = 65535;
 
-	printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n",
+	printk("SyncLink PC Card %s:IO=%04X IRQ=%d\n",
 		info->device_name, info->io_base, info->irq_level);
 
 #if SYNCLINK_GENERIC_HDLC
-	hdlcdev_init(info);
+	ret = hdlcdev_init(info);
+	if (ret != 0)
+		goto failed;
 #endif
-	tty_port_register_device(&info->port, serial_driver, info->line,
+
+	tty_dev = tty_port_register_device(&info->port, serial_driver, info->line,
 			&info->p_dev->dev);
+	if (IS_ERR(tty_dev)) {
+		ret = PTR_ERR(tty_dev);
+#if SYNCLINK_GENERIC_HDLC
+		hdlcdev_exit(info);
+#endif
+		goto failed;
+	}
+
+	return 0;
+
+failed:
+	if (current_dev)
+		current_dev->next_device = NULL;
+	else
+		mgslpc_device_list = NULL;
+	mgslpc_device_count--;
+	return ret;
 }
 
 static void mgslpc_remove_device(MGSLPC_INFO *remove_info)
@@ -3262,7 +3293,7 @@
 {
 	if (debug_level >= DEBUG_LEVEL_ISR)
 		printk("%s(%d):rx_stop(%s)\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	/* MODE:03 RAC Receiver Active, 0=inactive */
 	clear_reg_bits(info, CHA + MODE, BIT3);
@@ -3275,7 +3306,7 @@
 {
 	if (debug_level >= DEBUG_LEVEL_ISR)
 		printk("%s(%d):rx_start(%s)\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	rx_reset_buffers(info);
 	info->rx_enabled = false;
@@ -3291,7 +3322,7 @@
 {
 	if (debug_level >= DEBUG_LEVEL_ISR)
 		printk("%s(%d):tx_start(%s)\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	if (info->tx_count) {
 		/* If auto RTS enabled and RTS is inactive, then assert */
@@ -3329,7 +3360,7 @@
 {
 	if (debug_level >= DEBUG_LEVEL_ISR)
 		printk("%s(%d):tx_stop(%s)\n",
-			 __FILE__,__LINE__, info->device_name );
+			 __FILE__, __LINE__, info->device_name);
 
 	del_timer(&info->tx_timer);
 
@@ -3681,7 +3712,7 @@
 
 	if (debug_level >= DEBUG_LEVEL_BH)
 		printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n",
-			__FILE__,__LINE__,info->device_name,status,framesize);
+			__FILE__, __LINE__, info->device_name, status, framesize);
 
 	if (debug_level >= DEBUG_LEVEL_DATA)
 		trace_block(info, buf->data, framesize, 0);
@@ -3709,13 +3740,13 @@
 		}
 	}
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	buf->status = buf->count = 0;
 	info->rx_frame_count--;
 	info->rx_get++;
 	if (info->rx_get >= info->rx_buf_count)
 		info->rx_get = 0;
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	return true;
 }
@@ -3729,7 +3760,7 @@
 	bool rc = true;
 	unsigned long flags;
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	reset_device(info);
 
 	for (i = 0; i < count; i++) {
@@ -3742,7 +3773,7 @@
 		}
 	}
 
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 	return rc;
 }
 
@@ -3751,7 +3782,7 @@
 	unsigned long end_time;
 	unsigned long flags;
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	reset_device(info);
 
 	info->testing_irq = true;
@@ -3765,7 +3796,7 @@
 	write_reg(info, CHA + TIMR, 0);	/* 512 cycles */
 	issue_command(info, CHA, CMD_START_TIMER);
 
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	end_time=100;
 	while(end_time-- && !info->irq_occurred) {
@@ -3774,9 +3805,9 @@
 
 	info->testing_irq = false;
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	reset_device(info);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	return info->irq_occurred;
 }
@@ -3785,21 +3816,21 @@
 {
 	if (!register_test(info)) {
 		info->init_error = DiagStatus_AddressFailure;
-		printk( "%s(%d):Register test failure for device %s Addr=%04X\n",
-			__FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) );
+		printk("%s(%d):Register test failure for device %s Addr=%04X\n",
+			__FILE__, __LINE__, info->device_name, (unsigned short)(info->io_base));
 		return -ENODEV;
 	}
 
 	if (!irq_test(info)) {
 		info->init_error = DiagStatus_IrqFailure;
-		printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n",
-			__FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) );
+		printk("%s(%d):Interrupt test failure for device %s IRQ=%d\n",
+			__FILE__, __LINE__, info->device_name, (unsigned short)(info->irq_level));
 		return -ENODEV;
 	}
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
 		printk("%s(%d):device %s passed diagnostics\n",
-			__FILE__,__LINE__,info->device_name);
+			__FILE__, __LINE__, info->device_name);
 	return 0;
 }
 
@@ -3808,9 +3839,9 @@
 	int i;
 	int linecount;
 	if (xmit)
-		printk("%s tx data:\n",info->device_name);
+		printk("%s tx data:\n", info->device_name);
 	else
-		printk("%s rx data:\n",info->device_name);
+		printk("%s rx data:\n", info->device_name);
 
 	while(count) {
 		if (count > 16)
@@ -3819,12 +3850,12 @@
 			linecount = count;
 
 		for(i=0;i<linecount;i++)
-			printk("%02X ",(unsigned char)data[i]);
+			printk("%02X ", (unsigned char)data[i]);
 		for(;i<17;i++)
 			printk("   ");
 		for(i=0;i<linecount;i++) {
 			if (data[i]>=040 && data[i]<=0176)
-				printk("%c",data[i]);
+				printk("%c", data[i]);
 			else
 				printk(".");
 		}
@@ -3843,18 +3874,18 @@
 	MGSLPC_INFO *info = (MGSLPC_INFO*)context;
 	unsigned long flags;
 
-	if ( debug_level >= DEBUG_LEVEL_INFO )
-		printk( "%s(%d):tx_timeout(%s)\n",
-			__FILE__,__LINE__,info->device_name);
-	if(info->tx_active &&
-	   info->params.mode == MGSL_MODE_HDLC) {
+	if (debug_level >= DEBUG_LEVEL_INFO)
+		printk("%s(%d):tx_timeout(%s)\n",
+			__FILE__, __LINE__, info->device_name);
+	if (info->tx_active &&
+	    info->params.mode == MGSL_MODE_HDLC) {
 		info->icount.txtimeout++;
 	}
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	info->tx_active = false;
 	info->tx_count = info->tx_put = info->tx_get = 0;
 
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 #if SYNCLINK_GENERIC_HDLC
 	if (info->netcount)
@@ -3936,7 +3967,7 @@
 	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name);
+		printk(KERN_INFO "%s:hdlc_xmit(%s)\n", __FILE__, dev->name);
 
 	/* stop sending until this frame completes */
 	netif_stop_queue(dev);
@@ -3957,13 +3988,13 @@
 	dev->trans_start = jiffies;
 
 	/* start hardware transmitter if necessary */
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	if (!info->tx_active) {
 		struct tty_struct *tty = tty_port_tty_get(&info->port);
-	 	tx_start(info, tty);
-	 	tty_kref_put(tty);
+		tx_start(info, tty);
+		tty_kref_put(tty);
 	}
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	return NETDEV_TX_OK;
 }
@@ -3984,10 +4015,11 @@
 	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name);
+		printk("%s:hdlcdev_open(%s)\n", __FILE__, dev->name);
 
 	/* generic HDLC layer open processing */
-	if ((rc = hdlc_open(dev)))
+	rc = hdlc_open(dev);
+	if (rc != 0)
 		return rc;
 
 	/* arbitrate between network and tty opens */
@@ -4002,7 +4034,8 @@
 
 	tty = tty_port_tty_get(&info->port);
 	/* claim resources and init adapter */
-	if ((rc = startup(info, tty)) != 0) {
+	rc = startup(info, tty);
+	if (rc != 0) {
 		tty_kref_put(tty);
 		spin_lock_irqsave(&info->netlock, flags);
 		info->netcount=0;
@@ -4044,7 +4077,7 @@
 	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name);
+		printk("%s:hdlcdev_close(%s)\n", __FILE__, dev->name);
 
 	netif_stop_queue(dev);
 
@@ -4078,7 +4111,7 @@
 	unsigned int flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
+		printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
 
 	/* return error if TTY interface open */
 	if (info->port.count)
@@ -4179,14 +4212,14 @@
 	unsigned long flags;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("hdlcdev_tx_timeout(%s)\n",dev->name);
+		printk("hdlcdev_tx_timeout(%s)\n", dev->name);
 
 	dev->stats.tx_errors++;
 	dev->stats.tx_aborted_errors++;
 
-	spin_lock_irqsave(&info->lock,flags);
+	spin_lock_irqsave(&info->lock, flags);
 	tx_stop(info);
-	spin_unlock_irqrestore(&info->lock,flags);
+	spin_unlock_irqrestore(&info->lock, flags);
 
 	netif_wake_queue(dev);
 }
@@ -4217,7 +4250,7 @@
 	struct net_device *dev = info->netdev;
 
 	if (debug_level >= DEBUG_LEVEL_INFO)
-		printk("hdlcdev_rx(%s)\n",dev->name);
+		printk("hdlcdev_rx(%s)\n", dev->name);
 
 	if (skb == NULL) {
 		printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name);
@@ -4260,8 +4293,9 @@
 
 	/* allocate and initialize network and HDLC layer objects */
 
-	if (!(dev = alloc_hdlcdev(info))) {
-		printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__);
+	dev = alloc_hdlcdev(info);
+	if (dev == NULL) {
+		printk(KERN_ERR "%s:hdlc device allocation failure\n", __FILE__);
 		return -ENOMEM;
 	}
 
@@ -4280,8 +4314,9 @@
 	hdlc->xmit   = hdlcdev_xmit;
 
 	/* register objects with HDLC layer */
-	if ((rc = register_hdlc_device(dev))) {
-		printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__);
+	rc = register_hdlc_device(dev);
+	if (rc) {
+		printk(KERN_WARNING "%s:unable to register hdlc device\n", __FILE__);
 		free_netdev(dev);
 		return rc;
 	}
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 07122a9..5168a13 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -29,7 +29,7 @@
 
 config EXTCON_MAX77693
 	tristate "MAX77693 EXTCON Support"
-	depends on MFD_MAX77693
+	depends on MFD_MAX77693 && INPUT
 	select IRQ_DOMAIN
 	select REGMAP_I2C
 	help
@@ -47,7 +47,7 @@
 
 config EXTCON_ARIZONA
 	tristate "Wolfson Arizona EXTCON support"
-	depends on MFD_ARIZONA && INPUT
+	depends on MFD_ARIZONA && INPUT && SND_SOC
 	help
 	  Say Y here to enable support for external accessory detection
 	  with Wolfson Arizona devices. These are audio CODECs with
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 414aed5..dc357a4 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -27,12 +27,18 @@
 #include <linux/regulator/consumer.h>
 #include <linux/extcon.h>
 
+#include <sound/soc.h>
+
 #include <linux/mfd/arizona/core.h>
 #include <linux/mfd/arizona/pdata.h>
 #include <linux/mfd/arizona/registers.h>
 
 #define ARIZONA_NUM_BUTTONS 6
 
+#define ARIZONA_ACCDET_MODE_MIC 0
+#define ARIZONA_ACCDET_MODE_HPL 1
+#define ARIZONA_ACCDET_MODE_HPR 2
+
 struct arizona_extcon_info {
 	struct device *dev;
 	struct arizona *arizona;
@@ -45,17 +51,28 @@
 	int micd_num_modes;
 
 	bool micd_reva;
+	bool micd_clamp;
+
+	struct delayed_work hpdet_work;
+
+	bool hpdet_active;
+	bool hpdet_done;
+
+	int num_hpdet_res;
+	unsigned int hpdet_res[3];
 
 	bool mic;
 	bool detecting;
 	int jack_flips;
 
+	int hpdet_ip;
+
 	struct extcon_dev edev;
 };
 
 static const struct arizona_micd_config micd_default_modes[] = {
-	{ ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
 	{ 0,                  2 << ARIZONA_MICD_BIAS_SRC_SHIFT, 1 },
+	{ ARIZONA_ACCDET_SRC, 1 << ARIZONA_MICD_BIAS_SRC_SHIFT, 0 },
 };
 
 static struct {
@@ -73,11 +90,13 @@
 #define ARIZONA_CABLE_MECHANICAL 0
 #define ARIZONA_CABLE_MICROPHONE 1
 #define ARIZONA_CABLE_HEADPHONE  2
+#define ARIZONA_CABLE_LINEOUT    3
 
 static const char *arizona_cable[] = {
 	"Mechanical",
 	"Microphone",
 	"Headphone",
+	"Line-out",
 	NULL,
 };
 
@@ -85,8 +104,9 @@
 {
 	struct arizona *arizona = info->arizona;
 
-	gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
-				info->micd_modes[mode].gpio);
+	if (arizona->pdata.micd_pol_gpio > 0)
+		gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
+					info->micd_modes[mode].gpio);
 	regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
 			   ARIZONA_MICD_BIAS_SRC_MASK,
 			   info->micd_modes[mode].bias);
@@ -98,19 +118,70 @@
 	dev_dbg(arizona->dev, "Set jack polarity to %d\n", mode);
 }
 
+static const char *arizona_extcon_get_micbias(struct arizona_extcon_info *info)
+{
+	switch (info->micd_modes[0].bias >> ARIZONA_MICD_BIAS_SRC_SHIFT) {
+	case 1:
+		return "MICBIAS1";
+	case 2:
+		return "MICBIAS2";
+	case 3:
+		return "MICBIAS3";
+	default:
+		return "MICVDD";
+	}
+}
+
+static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
+{
+	struct arizona *arizona = info->arizona;
+	const char *widget = arizona_extcon_get_micbias(info);
+	struct snd_soc_dapm_context *dapm = arizona->dapm;
+	int ret;
+
+	mutex_lock(&dapm->card->dapm_mutex);
+
+	ret = snd_soc_dapm_force_enable_pin(dapm, widget);
+	if (ret != 0)
+		dev_warn(arizona->dev, "Failed to enable %s: %d\n",
+			 widget, ret);
+
+	mutex_unlock(&dapm->card->dapm_mutex);
+
+	snd_soc_dapm_sync(dapm);
+
+	if (!arizona->pdata.micd_force_micbias) {
+		mutex_lock(&dapm->card->dapm_mutex);
+
+		ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
+		if (ret != 0)
+			dev_warn(arizona->dev, "Failed to disable %s: %d\n",
+				 widget, ret);
+
+		mutex_unlock(&dapm->card->dapm_mutex);
+
+		snd_soc_dapm_sync(dapm);
+	}
+}
+
 static void arizona_start_mic(struct arizona_extcon_info *info)
 {
 	struct arizona *arizona = info->arizona;
 	bool change;
 	int ret;
 
-	info->detecting = true;
-	info->mic = false;
-	info->jack_flips = 0;
-
 	/* Microphone detection can't use idle mode */
 	pm_runtime_get(info->dev);
 
+	if (info->detecting) {
+		ret = regulator_allow_bypass(info->micvdd, false);
+		if (ret != 0) {
+			dev_err(arizona->dev,
+				"Failed to regulate MICVDD: %d\n",
+				ret);
+		}
+	}
+
 	ret = regulator_enable(info->micvdd);
 	if (ret != 0) {
 		dev_err(arizona->dev, "Failed to enable MICVDD: %d\n",
@@ -123,6 +194,12 @@
 		regmap_write(arizona->regmap, 0x80, 0x0);
 	}
 
+	regmap_update_bits(arizona->regmap,
+			   ARIZONA_ACCESSORY_DETECT_MODE_1,
+			   ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+	arizona_extcon_pulse_micbias(info);
+
 	regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
 				 ARIZONA_MICD_ENA, ARIZONA_MICD_ENA,
 				 &change);
@@ -135,18 +212,39 @@
 static void arizona_stop_mic(struct arizona_extcon_info *info)
 {
 	struct arizona *arizona = info->arizona;
+	const char *widget = arizona_extcon_get_micbias(info);
+	struct snd_soc_dapm_context *dapm = arizona->dapm;
 	bool change;
+	int ret;
 
 	regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
 				 ARIZONA_MICD_ENA, 0,
 				 &change);
 
+	mutex_lock(&dapm->card->dapm_mutex);
+
+	ret = snd_soc_dapm_disable_pin(dapm, widget);
+	if (ret != 0)
+		dev_warn(arizona->dev,
+			 "Failed to disable %s: %d\n",
+			 widget, ret);
+
+	mutex_unlock(&dapm->card->dapm_mutex);
+
+	snd_soc_dapm_sync(dapm);
+
 	if (info->micd_reva) {
 		regmap_write(arizona->regmap, 0x80, 0x3);
 		regmap_write(arizona->regmap, 0x294, 2);
 		regmap_write(arizona->regmap, 0x80, 0x0);
 	}
 
+	ret = regulator_allow_bypass(info->micvdd, true);
+	if (ret != 0) {
+		dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
+			ret);
+	}
+
 	if (change) {
 		regulator_disable(info->micvdd);
 		pm_runtime_mark_last_busy(info->dev);
@@ -154,6 +252,478 @@
 	}
 }
 
+static struct {
+	unsigned int factor_a;
+	unsigned int factor_b;
+} arizona_hpdet_b_ranges[] = {
+	{  5528,   362464 },
+	{ 11084,  6186851 },
+	{ 11065, 65460395 },
+};
+
+static struct {
+	int min;
+	int max;
+} arizona_hpdet_c_ranges[] = {
+	{ 0,       30 },
+	{ 8,      100 },
+	{ 100,   1000 },
+	{ 1000, 10000 },
+};
+
+static int arizona_hpdet_read(struct arizona_extcon_info *info)
+{
+	struct arizona *arizona = info->arizona;
+	unsigned int val, range;
+	int ret;
+
+	ret = regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_2, &val);
+	if (ret != 0) {
+		dev_err(arizona->dev, "Failed to read HPDET status: %d\n",
+			ret);
+		return ret;
+	}
+
+	switch (info->hpdet_ip) {
+	case 0:
+		if (!(val & ARIZONA_HP_DONE)) {
+			dev_err(arizona->dev, "HPDET did not complete: %x\n",
+				val);
+			return -EAGAIN;
+		}
+
+		val &= ARIZONA_HP_LVL_MASK;
+		break;
+
+	case 1:
+		if (!(val & ARIZONA_HP_DONE_B)) {
+			dev_err(arizona->dev, "HPDET did not complete: %x\n",
+				val);
+			return -EAGAIN;
+		}
+
+		ret = regmap_read(arizona->regmap, ARIZONA_HP_DACVAL, &val);
+		if (ret != 0) {
+			dev_err(arizona->dev, "Failed to read HP value: %d\n",
+				ret);
+			return -EAGAIN;
+		}
+
+		regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+			    &range);
+		range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
+			   >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
+
+		if (range < ARRAY_SIZE(arizona_hpdet_b_ranges) - 1 &&
+		    (val < 100 || val > 0x3fb)) {
+			range++;
+			dev_dbg(arizona->dev, "Moving to HPDET range %d\n",
+				range);
+			regmap_update_bits(arizona->regmap,
+					   ARIZONA_HEADPHONE_DETECT_1,
+					   ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+					   range <<
+					   ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+			return -EAGAIN;
+		}
+
+		/* If we go out of range report top of range */
+		if (val < 100 || val > 0x3fb) {
+			dev_dbg(arizona->dev, "Measurement out of range\n");
+			return 10000;
+		}
+
+		dev_dbg(arizona->dev, "HPDET read %d in range %d\n",
+			val, range);
+
+		val = arizona_hpdet_b_ranges[range].factor_b
+			/ ((val * 100) -
+			   arizona_hpdet_b_ranges[range].factor_a);
+		break;
+
+	default:
+		dev_warn(arizona->dev, "Unknown HPDET IP revision %d\n",
+			 info->hpdet_ip);
+	case 2:
+		if (!(val & ARIZONA_HP_DONE_B)) {
+			dev_err(arizona->dev, "HPDET did not complete: %x\n",
+				val);
+			return -EAGAIN;
+		}
+
+		val &= ARIZONA_HP_LVL_B_MASK;
+
+		regmap_read(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+			    &range);
+		range = (range & ARIZONA_HP_IMPEDANCE_RANGE_MASK)
+			   >> ARIZONA_HP_IMPEDANCE_RANGE_SHIFT;
+
+		/* Skip up or down a range? */
+		if (range && (val < arizona_hpdet_c_ranges[range].min)) {
+			range--;
+			dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
+				arizona_hpdet_c_ranges[range].min,
+				arizona_hpdet_c_ranges[range].max);
+			regmap_update_bits(arizona->regmap,
+					   ARIZONA_HEADPHONE_DETECT_1,
+					   ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+					   range <<
+					   ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+			return -EAGAIN;
+		}
+
+		if (range < ARRAY_SIZE(arizona_hpdet_c_ranges) - 1 &&
+		    (val >= arizona_hpdet_c_ranges[range].max)) {
+			range++;
+			dev_dbg(arizona->dev, "Moving to HPDET range %d-%d\n",
+				arizona_hpdet_c_ranges[range].min,
+				arizona_hpdet_c_ranges[range].max);
+			regmap_update_bits(arizona->regmap,
+					   ARIZONA_HEADPHONE_DETECT_1,
+					   ARIZONA_HP_IMPEDANCE_RANGE_MASK,
+					   range <<
+					   ARIZONA_HP_IMPEDANCE_RANGE_SHIFT);
+			return -EAGAIN;
+		}
+	}
+
+	dev_dbg(arizona->dev, "HP impedance %d ohms\n", val);
+	return val;
+}
+
+static int arizona_hpdet_do_id(struct arizona_extcon_info *info, int *reading)
+{
+	struct arizona *arizona = info->arizona;
+	int id_gpio = arizona->pdata.hpdet_id_gpio;
+
+	/*
+	 * If we're using HPDET for accessory identification we need
+	 * to take multiple measurements, step through them in sequence.
+	 */
+	if (arizona->pdata.hpdet_acc_id) {
+		info->hpdet_res[info->num_hpdet_res++] = *reading;
+
+		/*
+		 * If the impedence is too high don't measure the
+		 * second ground.
+		 */
+		if (info->num_hpdet_res == 1 && *reading >= 45) {
+			dev_dbg(arizona->dev, "Skipping ground flip\n");
+			info->hpdet_res[info->num_hpdet_res++] = *reading;
+		}
+
+		if (info->num_hpdet_res == 1) {
+			dev_dbg(arizona->dev, "Flipping ground\n");
+
+			regmap_update_bits(arizona->regmap,
+					   ARIZONA_ACCESSORY_DETECT_MODE_1,
+					   ARIZONA_ACCDET_SRC,
+					   ~info->micd_modes[0].src);
+
+			regmap_update_bits(arizona->regmap,
+					   ARIZONA_HEADPHONE_DETECT_1,
+					   ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+			return -EAGAIN;
+		}
+
+		/* Only check the mic directly if we didn't already ID it */
+		if (id_gpio && info->num_hpdet_res == 2 &&
+		    !((info->hpdet_res[0] > info->hpdet_res[1] * 2))) {
+			dev_dbg(arizona->dev, "Measuring mic\n");
+
+			regmap_update_bits(arizona->regmap,
+					   ARIZONA_ACCESSORY_DETECT_MODE_1,
+					   ARIZONA_ACCDET_MODE_MASK |
+					   ARIZONA_ACCDET_SRC,
+					   ARIZONA_ACCDET_MODE_HPR |
+					   info->micd_modes[0].src);
+
+			gpio_set_value_cansleep(id_gpio, 1);
+
+			regmap_update_bits(arizona->regmap,
+					   ARIZONA_HEADPHONE_DETECT_1,
+					   ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+			return -EAGAIN;
+		}
+
+		/* OK, got both.  Now, compare... */
+		dev_dbg(arizona->dev, "HPDET measured %d %d %d\n",
+			info->hpdet_res[0], info->hpdet_res[1],
+			info->hpdet_res[2]);
+
+
+		/* Take the headphone impedance for the main report */
+		*reading = info->hpdet_res[0];
+
+		/*
+		 * Either the two grounds measure differently or we
+		 * measure the mic as high impedance.
+		 */
+		if ((info->hpdet_res[0] > info->hpdet_res[1] * 2) ||
+		    (id_gpio && info->hpdet_res[2] > 10)) {
+			dev_dbg(arizona->dev, "Detected mic\n");
+			info->mic = true;
+			info->detecting = true;
+		} else {
+			dev_dbg(arizona->dev, "Detected headphone\n");
+		}
+
+		/* Make sure everything is reset back to the real polarity */
+		regmap_update_bits(arizona->regmap,
+				   ARIZONA_ACCESSORY_DETECT_MODE_1,
+				   ARIZONA_ACCDET_SRC,
+				   info->micd_modes[0].src);
+	}
+
+	return 0;
+}
+
+static irqreturn_t arizona_hpdet_irq(int irq, void *data)
+{
+	struct arizona_extcon_info *info = data;
+	struct arizona *arizona = info->arizona;
+	int id_gpio = arizona->pdata.hpdet_id_gpio;
+	int report = ARIZONA_CABLE_HEADPHONE;
+	unsigned int val;
+	int ret, reading;
+
+	mutex_lock(&info->lock);
+
+	/* If we got a spurious IRQ for some reason then ignore it */
+	if (!info->hpdet_active) {
+		dev_warn(arizona->dev, "Spurious HPDET IRQ\n");
+		mutex_unlock(&info->lock);
+		return IRQ_NONE;
+	}
+
+	/* If the cable was removed while measuring ignore the result */
+	ret = extcon_get_cable_state_(&info->edev, ARIZONA_CABLE_MECHANICAL);
+	if (ret < 0) {
+		dev_err(arizona->dev, "Failed to check cable state: %d\n",
+			ret);
+		goto out;
+	} else if (!ret) {
+		dev_dbg(arizona->dev, "Ignoring HPDET for removed cable\n");
+		goto done;
+	}
+
+	ret = arizona_hpdet_read(info);
+	if (ret == -EAGAIN) {
+		goto out;
+	} else if (ret < 0) {
+		goto done;
+	}
+	reading = ret;
+
+	/* Reset back to starting range */
+	regmap_update_bits(arizona->regmap,
+			   ARIZONA_HEADPHONE_DETECT_1,
+			   ARIZONA_HP_IMPEDANCE_RANGE_MASK | ARIZONA_HP_POLL,
+			   0);
+
+	ret = arizona_hpdet_do_id(info, &reading);
+	if (ret == -EAGAIN) {
+		goto out;
+	} else if (ret < 0) {
+		goto done;
+	}
+
+	/* Report high impedence cables as line outputs */
+	if (reading >= 5000)
+		report = ARIZONA_CABLE_LINEOUT;
+	else
+		report = ARIZONA_CABLE_HEADPHONE;
+
+	ret = extcon_set_cable_state_(&info->edev, report, true);
+	if (ret != 0)
+		dev_err(arizona->dev, "Failed to report HP/line: %d\n",
+			ret);
+
+	mutex_lock(&arizona->dapm->card->dapm_mutex);
+
+	ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &val);
+	if (ret != 0) {
+		dev_err(arizona->dev, "Failed to read output enables: %d\n",
+			ret);
+		val = 0;
+	}
+
+	if (!(val & (ARIZONA_OUT1L_ENA | ARIZONA_OUT1R_ENA))) {
+		ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 0);
+		if (ret != 0)
+			dev_warn(arizona->dev, "Failed to undo magic: %d\n",
+				 ret);
+
+		ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 0);
+		if (ret != 0)
+			dev_warn(arizona->dev, "Failed to undo magic: %d\n",
+				 ret);
+	}
+
+	mutex_unlock(&arizona->dapm->card->dapm_mutex);
+
+done:
+	if (id_gpio)
+		gpio_set_value_cansleep(id_gpio, 0);
+
+	/* Revert back to MICDET mode */
+	regmap_update_bits(arizona->regmap,
+			   ARIZONA_ACCESSORY_DETECT_MODE_1,
+			   ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+	/* If we have a mic then reenable MICDET */
+	if (info->mic)
+		arizona_start_mic(info);
+
+	if (info->hpdet_active) {
+		pm_runtime_put_autosuspend(info->dev);
+		info->hpdet_active = false;
+	}
+
+	info->hpdet_done = true;
+
+out:
+	mutex_unlock(&info->lock);
+
+	return IRQ_HANDLED;
+}
+
+static void arizona_identify_headphone(struct arizona_extcon_info *info)
+{
+	struct arizona *arizona = info->arizona;
+	int ret;
+
+	if (info->hpdet_done)
+		return;
+
+	dev_dbg(arizona->dev, "Starting HPDET\n");
+
+	/* Make sure we keep the device enabled during the measurement */
+	pm_runtime_get(info->dev);
+
+	info->hpdet_active = true;
+
+	if (info->mic)
+		arizona_stop_mic(info);
+
+	ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000, 0x4000);
+	if (ret != 0)
+		dev_warn(arizona->dev, "Failed to do magic: %d\n", ret);
+
+	ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000, 0x4000);
+	if (ret != 0)
+		dev_warn(arizona->dev, "Failed to do magic: %d\n", ret);
+
+	ret = regmap_update_bits(arizona->regmap,
+				 ARIZONA_ACCESSORY_DETECT_MODE_1,
+				 ARIZONA_ACCDET_MODE_MASK,
+				 ARIZONA_ACCDET_MODE_HPL);
+	if (ret != 0) {
+		dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
+		goto err;
+	}
+
+	ret = regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+				 ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+	if (ret != 0) {
+		dev_err(arizona->dev, "Can't start HPDETL measurement: %d\n",
+			ret);
+		goto err;
+	}
+
+	return;
+
+err:
+	regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+			   ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+	/* Just report headphone */
+	ret = extcon_update_state(&info->edev,
+				  1 << ARIZONA_CABLE_HEADPHONE,
+				  1 << ARIZONA_CABLE_HEADPHONE);
+	if (ret != 0)
+		dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
+
+	if (info->mic)
+		arizona_start_mic(info);
+
+	info->hpdet_active = false;
+}
+
+static void arizona_start_hpdet_acc_id(struct arizona_extcon_info *info)
+{
+	struct arizona *arizona = info->arizona;
+	unsigned int val;
+	int ret;
+
+	dev_dbg(arizona->dev, "Starting identification via HPDET\n");
+
+	/* Make sure we keep the device enabled during the measurement */
+	pm_runtime_get_sync(info->dev);
+
+	info->hpdet_active = true;
+
+	arizona_extcon_pulse_micbias(info);
+
+	mutex_lock(&arizona->dapm->card->dapm_mutex);
+
+	ret = regmap_read(arizona->regmap, ARIZONA_OUTPUT_ENABLES_1, &val);
+	if (ret != 0) {
+		dev_err(arizona->dev, "Failed to read output enables: %d\n",
+			ret);
+		val = 0;
+	}
+
+	if (!(val & (ARIZONA_OUT1L_ENA | ARIZONA_OUT1R_ENA))) {
+		ret = regmap_update_bits(arizona->regmap, 0x225, 0x4000,
+					 0x4000);
+		if (ret != 0)
+			dev_warn(arizona->dev, "Failed to do magic: %d\n",
+				 ret);
+
+		ret = regmap_update_bits(arizona->regmap, 0x226, 0x4000,
+					 0x4000);
+		if (ret != 0)
+			dev_warn(arizona->dev, "Failed to do magic: %d\n",
+				 ret);
+	}
+
+	mutex_unlock(&arizona->dapm->card->dapm_mutex);
+
+	ret = regmap_update_bits(arizona->regmap,
+				 ARIZONA_ACCESSORY_DETECT_MODE_1,
+				 ARIZONA_ACCDET_SRC | ARIZONA_ACCDET_MODE_MASK,
+				 info->micd_modes[0].src |
+				 ARIZONA_ACCDET_MODE_HPL);
+	if (ret != 0) {
+		dev_err(arizona->dev, "Failed to set HPDETL mode: %d\n", ret);
+		goto err;
+	}
+
+	ret = regmap_update_bits(arizona->regmap, ARIZONA_HEADPHONE_DETECT_1,
+				 ARIZONA_HP_POLL, ARIZONA_HP_POLL);
+	if (ret != 0) {
+		dev_err(arizona->dev, "Can't start HPDETL measurement: %d\n",
+			ret);
+		goto err;
+	}
+
+	return;
+
+err:
+	regmap_update_bits(arizona->regmap, ARIZONA_ACCESSORY_DETECT_MODE_1,
+			   ARIZONA_ACCDET_MODE_MASK, ARIZONA_ACCDET_MODE_MIC);
+
+	/* Just report headphone */
+	ret = extcon_update_state(&info->edev,
+				  1 << ARIZONA_CABLE_HEADPHONE,
+				  1 << ARIZONA_CABLE_HEADPHONE);
+	if (ret != 0)
+		dev_err(arizona->dev, "Failed to report headphone: %d\n", ret);
+
+	info->hpdet_active = false;
+}
+
 static irqreturn_t arizona_micdet(int irq, void *data)
 {
 	struct arizona_extcon_info *info = data;
@@ -187,16 +757,23 @@
 
 	/* If we got a high impedence we should have a headset, report it. */
 	if (info->detecting && (val & 0x400)) {
+		arizona_identify_headphone(info);
+
 		ret = extcon_update_state(&info->edev,
-					  1 << ARIZONA_CABLE_MICROPHONE |
-					  1 << ARIZONA_CABLE_HEADPHONE,
-					  1 << ARIZONA_CABLE_MICROPHONE |
-					  1 << ARIZONA_CABLE_HEADPHONE);
+					  1 << ARIZONA_CABLE_MICROPHONE,
+					  1 << ARIZONA_CABLE_MICROPHONE);
 
 		if (ret != 0)
 			dev_err(arizona->dev, "Headset report failed: %d\n",
 				ret);
 
+		/* Don't need to regulate for button detection */
+		ret = regulator_allow_bypass(info->micvdd, false);
+		if (ret != 0) {
+			dev_err(arizona->dev, "Failed to bypass MICVDD: %d\n",
+				ret);
+		}
+
 		info->mic = true;
 		info->detecting = false;
 		goto handled;
@@ -209,20 +786,13 @@
 	 * impedence then give up and report headphones.
 	 */
 	if (info->detecting && (val & 0x3f8)) {
-		info->jack_flips++;
-
 		if (info->jack_flips >= info->micd_num_modes) {
-			dev_dbg(arizona->dev, "Detected headphone\n");
-			info->detecting = false;
-			arizona_stop_mic(info);
+			dev_dbg(arizona->dev, "Detected HP/line\n");
+			arizona_identify_headphone(info);
 
-			ret = extcon_set_cable_state_(&info->edev,
-						      ARIZONA_CABLE_HEADPHONE,
-						      true);
-			if (ret != 0)
-				dev_err(arizona->dev,
-					"Headphone report failed: %d\n",
-				ret);
+			info->detecting = false;
+
+			arizona_stop_mic(info);
 		} else {
 			info->micd_mode++;
 			if (info->micd_mode == info->micd_num_modes)
@@ -258,13 +828,7 @@
 			info->detecting = false;
 			arizona_stop_mic(info);
 
-			ret = extcon_set_cable_state_(&info->edev,
-						      ARIZONA_CABLE_HEADPHONE,
-						      true);
-			if (ret != 0)
-				dev_err(arizona->dev,
-					"Headphone report failed: %d\n",
-				ret);
+			arizona_identify_headphone(info);
 		} else {
 			dev_warn(arizona->dev, "Button with no mic: %x\n",
 				 val);
@@ -275,6 +839,7 @@
 			input_report_key(info->input,
 					 arizona_lvl_to_key[i].report, 0);
 		input_sync(info->input);
+		arizona_extcon_pulse_micbias(info);
 	}
 
 handled:
@@ -284,17 +849,38 @@
 	return IRQ_HANDLED;
 }
 
+static void arizona_hpdet_work(struct work_struct *work)
+{
+	struct arizona_extcon_info *info = container_of(work,
+							struct arizona_extcon_info,
+							hpdet_work.work);
+
+	mutex_lock(&info->lock);
+	arizona_start_hpdet_acc_id(info);
+	mutex_unlock(&info->lock);
+}
+
 static irqreturn_t arizona_jackdet(int irq, void *data)
 {
 	struct arizona_extcon_info *info = data;
 	struct arizona *arizona = info->arizona;
-	unsigned int val;
+	unsigned int val, present, mask;
 	int ret, i;
 
 	pm_runtime_get_sync(info->dev);
 
+	cancel_delayed_work_sync(&info->hpdet_work);
+
 	mutex_lock(&info->lock);
 
+	if (arizona->pdata.jd_gpio5) {
+		mask = ARIZONA_MICD_CLAMP_STS;
+		present = 0;
+	} else {
+		mask = ARIZONA_JD1_STS;
+		present = ARIZONA_JD1_STS;
+	}
+
 	ret = regmap_read(arizona->regmap, ARIZONA_AOD_IRQ_RAW_STATUS, &val);
 	if (ret != 0) {
 		dev_err(arizona->dev, "Failed to read jackdet status: %d\n",
@@ -304,7 +890,7 @@
 		return IRQ_NONE;
 	}
 
-	if (val & ARIZONA_JD1_STS) {
+	if ((val & mask) == present) {
 		dev_dbg(arizona->dev, "Detected jack\n");
 		ret = extcon_set_cable_state_(&info->edev,
 					      ARIZONA_CABLE_MECHANICAL, true);
@@ -313,12 +899,31 @@
 			dev_err(arizona->dev, "Mechanical report failed: %d\n",
 				ret);
 
-		arizona_start_mic(info);
+		if (!arizona->pdata.hpdet_acc_id) {
+			info->detecting = true;
+			info->mic = false;
+			info->jack_flips = 0;
+
+			arizona_start_mic(info);
+		} else {
+			schedule_delayed_work(&info->hpdet_work,
+					      msecs_to_jiffies(250));
+		}
+
+		regmap_update_bits(arizona->regmap,
+				   ARIZONA_JACK_DETECT_DEBOUNCE,
+				   ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB, 0);
 	} else {
 		dev_dbg(arizona->dev, "Detected jack removal\n");
 
 		arizona_stop_mic(info);
 
+		info->num_hpdet_res = 0;
+		for (i = 0; i < ARRAY_SIZE(info->hpdet_res); i++)
+			info->hpdet_res[i] = 0;
+		info->mic = false;
+		info->hpdet_done = false;
+
 		for (i = 0; i < ARIZONA_NUM_BUTTONS; i++)
 			input_report_key(info->input,
 					 arizona_lvl_to_key[i].report, 0);
@@ -328,8 +933,20 @@
 		if (ret != 0)
 			dev_err(arizona->dev, "Removal report failed: %d\n",
 				ret);
+
+		regmap_update_bits(arizona->regmap,
+				   ARIZONA_JACK_DETECT_DEBOUNCE,
+				   ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB,
+				   ARIZONA_MICD_CLAMP_DB | ARIZONA_JD1_DB);
 	}
 
+	/* Clear trig_sts to make sure DCVDD is not forced up */
+	regmap_write(arizona->regmap, ARIZONA_AOD_WKUP_AND_TRIG,
+		     ARIZONA_MICD_CLAMP_FALL_TRIG_STS |
+		     ARIZONA_MICD_CLAMP_RISE_TRIG_STS |
+		     ARIZONA_JD1_FALL_TRIG_STS |
+		     ARIZONA_JD1_RISE_TRIG_STS);
+
 	mutex_unlock(&info->lock);
 
 	pm_runtime_mark_last_busy(info->dev);
@@ -343,8 +960,12 @@
 	struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
 	struct arizona_pdata *pdata;
 	struct arizona_extcon_info *info;
+	int jack_irq_fall, jack_irq_rise;
 	int ret, mode, i;
 
+	if (!arizona->dapm || !arizona->dapm->card)
+		return -EPROBE_DEFER;
+
 	pdata = dev_get_platdata(arizona->dev);
 
 	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
@@ -364,7 +985,7 @@
 	mutex_init(&info->lock);
 	info->arizona = arizona;
 	info->dev = &pdev->dev;
-	info->detecting = true;
+	INIT_DELAYED_WORK(&info->hpdet_work, arizona_hpdet_work);
 	platform_set_drvdata(pdev, info);
 
 	switch (arizona->type) {
@@ -374,6 +995,8 @@
 			info->micd_reva = true;
 			break;
 		default:
+			info->micd_clamp = true;
+			info->hpdet_ip = 1;
 			break;
 		}
 		break;
@@ -416,9 +1039,64 @@
 		}
 	}
 
+	if (arizona->pdata.hpdet_id_gpio > 0) {
+		ret = devm_gpio_request_one(&pdev->dev,
+					    arizona->pdata.hpdet_id_gpio,
+					    GPIOF_OUT_INIT_LOW,
+					    "HPDET");
+		if (ret != 0) {
+			dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
+				arizona->pdata.hpdet_id_gpio, ret);
+			goto err_register;
+		}
+	}
+
+	if (arizona->pdata.micd_bias_start_time)
+		regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+				   ARIZONA_MICD_BIAS_STARTTIME_MASK,
+				   arizona->pdata.micd_bias_start_time
+				   << ARIZONA_MICD_BIAS_STARTTIME_SHIFT);
+
+	if (arizona->pdata.micd_rate)
+		regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+				   ARIZONA_MICD_RATE_MASK,
+				   arizona->pdata.micd_rate
+				   << ARIZONA_MICD_RATE_SHIFT);
+
+	if (arizona->pdata.micd_dbtime)
+		regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
+				   ARIZONA_MICD_DBTIME_MASK,
+				   arizona->pdata.micd_dbtime
+				   << ARIZONA_MICD_DBTIME_SHIFT);
+
+	/*
+	 * If we have a clamp use it, activating in conjunction with
+	 * GPIO5 if that is connected for jack detect operation.
+	 */
+	if (info->micd_clamp) {
+		if (arizona->pdata.jd_gpio5) {
+			/* Put the GPIO into input mode */
+			regmap_write(arizona->regmap, ARIZONA_GPIO5_CTRL,
+				     0xc101);
+
+			regmap_update_bits(arizona->regmap,
+					   ARIZONA_MICD_CLAMP_CONTROL,
+					   ARIZONA_MICD_CLAMP_MODE_MASK, 0x9);
+		} else {
+			regmap_update_bits(arizona->regmap,
+					   ARIZONA_MICD_CLAMP_CONTROL,
+					   ARIZONA_MICD_CLAMP_MODE_MASK, 0x4);
+		}
+
+		regmap_update_bits(arizona->regmap,
+				   ARIZONA_JACK_DETECT_DEBOUNCE,
+				   ARIZONA_MICD_CLAMP_DB,
+				   ARIZONA_MICD_CLAMP_DB);
+	}
+
 	arizona_extcon_set_mode(info, 0);
 
-	info->input = input_allocate_device();
+	info->input = devm_input_allocate_device(&pdev->dev);
 	if (!info->input) {
 		dev_err(arizona->dev, "Can't allocate input dev\n");
 		ret = -ENOMEM;
@@ -436,7 +1114,15 @@
 	pm_runtime_idle(&pdev->dev);
 	pm_runtime_get_sync(&pdev->dev);
 
-	ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_RISE,
+	if (arizona->pdata.jd_gpio5) {
+		jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
+		jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
+	} else {
+		jack_irq_rise = ARIZONA_IRQ_JD_RISE;
+		jack_irq_fall = ARIZONA_IRQ_JD_FALL;
+	}
+
+	ret = arizona_request_irq(arizona, jack_irq_rise,
 				  "JACKDET rise", arizona_jackdet, info);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to get JACKDET rise IRQ: %d\n",
@@ -444,21 +1130,21 @@
 		goto err_input;
 	}
 
-	ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 1);
+	ret = arizona_set_irq_wake(arizona, jack_irq_rise, 1);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to set JD rise IRQ wake: %d\n",
 			ret);
 		goto err_rise;
 	}
 
-	ret = arizona_request_irq(arizona, ARIZONA_IRQ_JD_FALL,
+	ret = arizona_request_irq(arizona, jack_irq_fall,
 				  "JACKDET fall", arizona_jackdet, info);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to get JD fall IRQ: %d\n", ret);
 		goto err_rise_wake;
 	}
 
-	ret = arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 1);
+	ret = arizona_set_irq_wake(arizona, jack_irq_fall, 1);
 	if (ret != 0) {
 		dev_err(&pdev->dev, "Failed to set JD fall IRQ wake: %d\n",
 			ret);
@@ -472,11 +1158,12 @@
 		goto err_fall_wake;
 	}
 
-	regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
-			   ARIZONA_MICD_BIAS_STARTTIME_MASK |
-			   ARIZONA_MICD_RATE_MASK,
-			   7 << ARIZONA_MICD_BIAS_STARTTIME_SHIFT |
-			   8 << ARIZONA_MICD_RATE_SHIFT);
+	ret = arizona_request_irq(arizona, ARIZONA_IRQ_HPDET,
+				  "HPDET", arizona_hpdet_irq, info);
+	if (ret != 0) {
+		dev_err(&pdev->dev, "Failed to get HPDET IRQ: %d\n", ret);
+		goto err_micdet;
+	}
 
 	arizona_clk32k_enable(arizona);
 	regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_DEBOUNCE,
@@ -494,23 +1181,24 @@
 	ret = input_register_device(info->input);
 	if (ret) {
 		dev_err(&pdev->dev, "Can't register input device: %d\n", ret);
-		goto err_micdet;
+		goto err_hpdet;
 	}
 
 	return 0;
 
+err_hpdet:
+	arizona_free_irq(arizona, ARIZONA_IRQ_HPDET, info);
 err_micdet:
 	arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
 err_fall_wake:
-	arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0);
+	arizona_set_irq_wake(arizona, jack_irq_fall, 0);
 err_fall:
-	arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info);
+	arizona_free_irq(arizona, jack_irq_fall, info);
 err_rise_wake:
-	arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
+	arizona_set_irq_wake(arizona, jack_irq_rise, 0);
 err_rise:
-	arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
+	arizona_free_irq(arizona, jack_irq_rise, info);
 err_input:
-	input_free_device(info->input);
 err_register:
 	pm_runtime_disable(&pdev->dev);
 	extcon_dev_unregister(&info->edev);
@@ -522,18 +1210,32 @@
 {
 	struct arizona_extcon_info *info = platform_get_drvdata(pdev);
 	struct arizona *arizona = info->arizona;
+	int jack_irq_rise, jack_irq_fall;
 
 	pm_runtime_disable(&pdev->dev);
 
-	arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_RISE, 0);
-	arizona_set_irq_wake(arizona, ARIZONA_IRQ_JD_FALL, 0);
+	regmap_update_bits(arizona->regmap,
+			   ARIZONA_MICD_CLAMP_CONTROL,
+			   ARIZONA_MICD_CLAMP_MODE_MASK, 0);
+
+	if (arizona->pdata.jd_gpio5) {
+		jack_irq_rise = ARIZONA_IRQ_MICD_CLAMP_RISE;
+		jack_irq_fall = ARIZONA_IRQ_MICD_CLAMP_FALL;
+	} else {
+		jack_irq_rise = ARIZONA_IRQ_JD_RISE;
+		jack_irq_fall = ARIZONA_IRQ_JD_FALL;
+	}
+
+	arizona_set_irq_wake(arizona, jack_irq_rise, 0);
+	arizona_set_irq_wake(arizona, jack_irq_fall, 0);
+	arizona_free_irq(arizona, ARIZONA_IRQ_HPDET, info);
 	arizona_free_irq(arizona, ARIZONA_IRQ_MICDET, info);
-	arizona_free_irq(arizona, ARIZONA_IRQ_JD_RISE, info);
-	arizona_free_irq(arizona, ARIZONA_IRQ_JD_FALL, info);
+	arizona_free_irq(arizona, jack_irq_rise, info);
+	arizona_free_irq(arizona, jack_irq_fall, info);
+	cancel_delayed_work_sync(&info->hpdet_work);
 	regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE,
 			   ARIZONA_JD1_ENA, 0);
 	arizona_clk32k_disable(arizona);
-	input_unregister_device(info->input);
 	extcon_dev_unregister(&info->edev);
 
 	return 0;
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 1b14bfc..02bec32 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -29,7 +29,7 @@
 #include <linux/workqueue.h>
 #include <linux/gpio.h>
 #include <linux/extcon.h>
-#include <linux/extcon/extcon_gpio.h>
+#include <linux/extcon/extcon-gpio.h>
 
 struct gpio_extcon_data {
 	struct extcon_dev edev;
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 8c17b65..b70e381 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
+#include <linux/input.h>
 #include <linux/interrupt.h>
 #include <linux/err.h>
 #include <linux/platform_device.h>
@@ -29,92 +30,7 @@
 #include <linux/irqdomain.h>
 
 #define	DEV_NAME			"max77693-muic"
-
-/* MAX77693 MUIC - STATUS1~3 Register */
-#define STATUS1_ADC_SHIFT		(0)
-#define STATUS1_ADCLOW_SHIFT		(5)
-#define STATUS1_ADCERR_SHIFT		(6)
-#define STATUS1_ADC1K_SHIFT		(7)
-#define STATUS1_ADC_MASK		(0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK		(0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK		(0x1 << STATUS1_ADCERR_SHIFT)
-#define STATUS1_ADC1K_MASK		(0x1 << STATUS1_ADC1K_SHIFT)
-
-#define STATUS2_CHGTYP_SHIFT		(0)
-#define STATUS2_CHGDETRUN_SHIFT		(3)
-#define STATUS2_DCDTMR_SHIFT		(4)
-#define STATUS2_DXOVP_SHIFT		(5)
-#define STATUS2_VBVOLT_SHIFT		(6)
-#define STATUS2_VIDRM_SHIFT		(7)
-#define STATUS2_CHGTYP_MASK		(0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK		(0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK		(0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DXOVP_MASK		(0x1 << STATUS2_DXOVP_SHIFT)
-#define STATUS2_VBVOLT_MASK		(0x1 << STATUS2_VBVOLT_SHIFT)
-#define STATUS2_VIDRM_MASK		(0x1 << STATUS2_VIDRM_SHIFT)
-
-#define STATUS3_OVP_SHIFT		(2)
-#define STATUS3_OVP_MASK		(0x1 << STATUS3_OVP_SHIFT)
-
-/* MAX77693 CDETCTRL1~2 register */
-#define CDETCTRL1_CHGDETEN_SHIFT	(0)
-#define CDETCTRL1_CHGTYPMAN_SHIFT	(1)
-#define CDETCTRL1_DCDEN_SHIFT		(2)
-#define CDETCTRL1_DCD2SCT_SHIFT		(3)
-#define CDETCTRL1_CDDELAY_SHIFT		(4)
-#define CDETCTRL1_DCDCPL_SHIFT		(5)
-#define CDETCTRL1_CDPDET_SHIFT		(7)
-#define CDETCTRL1_CHGDETEN_MASK		(0x1 << CDETCTRL1_CHGDETEN_SHIFT)
-#define CDETCTRL1_CHGTYPMAN_MASK	(0x1 << CDETCTRL1_CHGTYPMAN_SHIFT)
-#define CDETCTRL1_DCDEN_MASK		(0x1 << CDETCTRL1_DCDEN_SHIFT)
-#define CDETCTRL1_DCD2SCT_MASK		(0x1 << CDETCTRL1_DCD2SCT_SHIFT)
-#define CDETCTRL1_CDDELAY_MASK		(0x1 << CDETCTRL1_CDDELAY_SHIFT)
-#define CDETCTRL1_DCDCPL_MASK		(0x1 << CDETCTRL1_DCDCPL_SHIFT)
-#define CDETCTRL1_CDPDET_MASK		(0x1 << CDETCTRL1_CDPDET_SHIFT)
-
-#define CDETCTRL2_VIDRMEN_SHIFT		(1)
-#define CDETCTRL2_DXOVPEN_SHIFT		(3)
-#define CDETCTRL2_VIDRMEN_MASK		(0x1 << CDETCTRL2_VIDRMEN_SHIFT)
-#define CDETCTRL2_DXOVPEN_MASK		(0x1 << CDETCTRL2_DXOVPEN_SHIFT)
-
-/* MAX77693 MUIC - CONTROL1~3 register */
-#define COMN1SW_SHIFT			(0)
-#define COMP2SW_SHIFT			(3)
-#define COMN1SW_MASK			(0x7 << COMN1SW_SHIFT)
-#define COMP2SW_MASK			(0x7 << COMP2SW_SHIFT)
-#define COMP_SW_MASK			(COMP2SW_MASK | COMN1SW_MASK)
-#define CONTROL1_SW_USB			((1 << COMP2SW_SHIFT) \
-						| (1 << COMN1SW_SHIFT))
-#define CONTROL1_SW_AUDIO		((2 << COMP2SW_SHIFT) \
-						| (2 << COMN1SW_SHIFT))
-#define CONTROL1_SW_UART		((3 << COMP2SW_SHIFT) \
-						| (3 << COMN1SW_SHIFT))
-#define CONTROL1_SW_OPEN		((0 << COMP2SW_SHIFT) \
-						| (0 << COMN1SW_SHIFT))
-
-#define CONTROL2_LOWPWR_SHIFT		(0)
-#define CONTROL2_ADCEN_SHIFT		(1)
-#define CONTROL2_CPEN_SHIFT		(2)
-#define CONTROL2_SFOUTASRT_SHIFT	(3)
-#define CONTROL2_SFOUTORD_SHIFT		(4)
-#define CONTROL2_ACCDET_SHIFT		(5)
-#define CONTROL2_USBCPINT_SHIFT		(6)
-#define CONTROL2_RCPS_SHIFT		(7)
-#define CONTROL2_LOWPWR_MASK		(0x1 << CONTROL2_LOWPWR_SHIFT)
-#define CONTROL2_ADCEN_MASK		(0x1 << CONTROL2_ADCEN_SHIFT)
-#define CONTROL2_CPEN_MASK		(0x1 << CONTROL2_CPEN_SHIFT)
-#define CONTROL2_SFOUTASRT_MASK		(0x1 << CONTROL2_SFOUTASRT_SHIFT)
-#define CONTROL2_SFOUTORD_MASK		(0x1 << CONTROL2_SFOUTORD_SHIFT)
-#define CONTROL2_ACCDET_MASK		(0x1 << CONTROL2_ACCDET_SHIFT)
-#define CONTROL2_USBCPINT_MASK		(0x1 << CONTROL2_USBCPINT_SHIFT)
-#define CONTROL2_RCPS_MASK		(0x1 << CONTROL2_RCPS_SHIFT)
-
-#define CONTROL3_JIGSET_SHIFT		(0)
-#define CONTROL3_BTLDSET_SHIFT		(2)
-#define CONTROL3_ADCDBSET_SHIFT		(4)
-#define CONTROL3_JIGSET_MASK		(0x3 << CONTROL3_JIGSET_SHIFT)
-#define CONTROL3_BTLDSET_MASK		(0x3 << CONTROL3_BTLDSET_SHIFT)
-#define CONTROL3_ADCDBSET_MASK		(0x3 << CONTROL3_ADCDBSET_SHIFT)
+#define	DELAY_MS_DEFAULT		20000		/* unit: millisecond */
 
 enum max77693_muic_adc_debounce_time {
 	ADC_DEBOUNCE_TIME_5MS = 0,
@@ -127,14 +43,40 @@
 	struct device *dev;
 	struct max77693_dev *max77693;
 	struct extcon_dev *edev;
-	int prev_adc;
-	int prev_adc_gnd;
+	int prev_cable_type;
+	int prev_cable_type_gnd;
 	int prev_chg_type;
+	int prev_button_type;
 	u8 status[2];
 
 	int irq;
 	struct work_struct irq_work;
 	struct mutex mutex;
+
+	/*
+	 * Use delayed workqueue to detect cable state and then
+	 * notify cable state to notifiee/platform through uevent.
+	 * After completing the booting of platform, the extcon provider
+	 * driver should notify cable state to upper layer.
+	 */
+	struct delayed_work wq_detcable;
+
+	/* Button of dock device */
+	struct input_dev *dock;
+
+	/*
+	 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+	 * h/w path of COMP2/COMN1 on CONTROL1 register.
+	 */
+	int path_usb;
+	int path_uart;
+};
+
+enum max77693_muic_cable_group {
+	MAX77693_CABLE_GROUP_ADC = 0,
+	MAX77693_CABLE_GROUP_ADC_GND,
+	MAX77693_CABLE_GROUP_CHG,
+	MAX77693_CABLE_GROUP_VBVOLT,
 };
 
 enum max77693_muic_charger_type {
@@ -215,27 +157,59 @@
 
 	/* The below accessories have same ADC value so ADCLow and
 	   ADC1K bit is used to separate specific accessory */
-	MAX77693_MUIC_GND_USB_OTG = 0x100,	/* ADC:0x0, ADCLow:0, ADC1K:0 */
-	MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, ADCLow:1, ADC1K:0 */
-	MAX77693_MUIC_GND_MHL_CABLE = 0x103,	/* ADC:0x0, ADCLow:1, ADC1K:1 */
+	MAX77693_MUIC_GND_USB_OTG = 0x100,	/* ADC:0x0, VBVolot:0, ADCLow:0, ADC1K:0 */
+	MAX77693_MUIC_GND_USB_OTG_VB = 0x104,	/* ADC:0x0, VBVolot:1, ADCLow:0, ADC1K:0 */
+	MAX77693_MUIC_GND_AV_CABLE_LOAD = 0x102,/* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:0 */
+	MAX77693_MUIC_GND_MHL = 0x103,		/* ADC:0x0, VBVolot:0, ADCLow:1, ADC1K:1 */
+	MAX77693_MUIC_GND_MHL_VB = 0x107,	/* ADC:0x0, VBVolot:1, ADCLow:1, ADC1K:1 */
 };
 
 /* MAX77693 MUIC device support below list of accessories(external connector) */
-const char *max77693_extcon_cable[] = {
-	[0] = "USB",
-	[1] = "USB-Host",
-	[2] = "TA",
-	[3] = "Fast-charger",
-	[4] = "Slow-charger",
-	[5] = "Charge-downstream",
-	[6] = "MHL",
-	[7] = "Audio-video-load",
-	[8] = "Audio-video-noload",
-	[9] = "JIG",
+enum {
+	EXTCON_CABLE_USB = 0,
+	EXTCON_CABLE_USB_HOST,
+	EXTCON_CABLE_TA,
+	EXTCON_CABLE_FAST_CHARGER,
+	EXTCON_CABLE_SLOW_CHARGER,
+	EXTCON_CABLE_CHARGE_DOWNSTREAM,
+	EXTCON_CABLE_MHL,
+	EXTCON_CABLE_MHL_TA,
+	EXTCON_CABLE_JIG_USB_ON,
+	EXTCON_CABLE_JIG_USB_OFF,
+	EXTCON_CABLE_JIG_UART_OFF,
+	EXTCON_CABLE_JIG_UART_ON,
+	EXTCON_CABLE_DOCK_SMART,
+	EXTCON_CABLE_DOCK_DESK,
+	EXTCON_CABLE_DOCK_AUDIO,
+
+	_EXTCON_CABLE_NUM,
+};
+
+static const char *max77693_extcon_cable[] = {
+	[EXTCON_CABLE_USB]			= "USB",
+	[EXTCON_CABLE_USB_HOST]			= "USB-Host",
+	[EXTCON_CABLE_TA]			= "TA",
+	[EXTCON_CABLE_FAST_CHARGER]		= "Fast-charger",
+	[EXTCON_CABLE_SLOW_CHARGER]		= "Slow-charger",
+	[EXTCON_CABLE_CHARGE_DOWNSTREAM]	= "Charge-downstream",
+	[EXTCON_CABLE_MHL]			= "MHL",
+	[EXTCON_CABLE_MHL_TA]			= "MHL_TA",
+	[EXTCON_CABLE_JIG_USB_ON]		= "JIG-USB-ON",
+	[EXTCON_CABLE_JIG_USB_OFF]		= "JIG-USB-OFF",
+	[EXTCON_CABLE_JIG_UART_OFF]		= "JIG-UART-OFF",
+	[EXTCON_CABLE_JIG_UART_ON]		= "Dock-Car",
+	[EXTCON_CABLE_DOCK_SMART]		= "Dock-Smart",
+	[EXTCON_CABLE_DOCK_DESK]		= "Dock-Desk",
+	[EXTCON_CABLE_DOCK_AUDIO]		= "Dock-Audio",
 
 	NULL,
 };
 
+/*
+ * max77693_muic_set_debounce_time - Set the debounce time of ADC
+ * @info: the instance including private data of max77693 MUIC
+ * @time: the debounce time of ADC
+ */
 static int max77693_muic_set_debounce_time(struct max77693_muic_info *info,
 		enum max77693_muic_adc_debounce_time time)
 {
@@ -250,18 +224,29 @@
 					  MAX77693_MUIC_REG_CTRL3,
 					  time << CONTROL3_ADCDBSET_SHIFT,
 					  CONTROL3_ADCDBSET_MASK);
-		if (ret)
+		if (ret) {
 			dev_err(info->dev, "failed to set ADC debounce time\n");
+			return -EAGAIN;
+		}
 		break;
 	default:
 		dev_err(info->dev, "invalid ADC debounce time\n");
-		ret = -EINVAL;
-		break;
+		return -EINVAL;
 	}
 
-	return ret;
+	return 0;
 };
 
+/*
+ * max77693_muic_set_path - Set hardware line according to attached cable
+ * @info: the instance including private data of max77693 MUIC
+ * @value: the path according to attached cable
+ * @attached: the state of cable (true:attached, false:detached)
+ *
+ * The max77693 MUIC device share outside H/W line among a varity of cables
+ * so, this function set internal path of H/W line according to the type of
+ * attached cable.
+ */
 static int max77693_muic_set_path(struct max77693_muic_info *info,
 		u8 val, bool attached)
 {
@@ -277,7 +262,7 @@
 			MAX77693_MUIC_REG_CTRL1, ctrl1, COMP_SW_MASK);
 	if (ret < 0) {
 		dev_err(info->dev, "failed to update MUIC register\n");
-		goto out;
+		return -EAGAIN;
 	}
 
 	if (attached)
@@ -290,141 +275,457 @@
 			CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
 	if (ret < 0) {
 		dev_err(info->dev, "failed to update MUIC register\n");
-		goto out;
+		return -EAGAIN;
 	}
 
 	dev_info(info->dev,
 		"CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
 		ctrl1, ctrl2, attached ? "attached" : "detached");
-out:
-	return ret;
+
+	return 0;
 }
 
-static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info,
-		bool attached)
+/*
+ * max77693_muic_get_cable_type - Return cable type and check cable state
+ * @info: the instance including private data of max77693 MUIC
+ * @group: the path according to attached cable
+ * @attached: store cable state and return
+ *
+ * This function check the cable state either attached or detached,
+ * and then divide precise type of cable according to cable group.
+ *	- MAX77693_CABLE_GROUP_ADC
+ *	- MAX77693_CABLE_GROUP_ADC_GND
+ *	- MAX77693_CABLE_GROUP_CHG
+ *	- MAX77693_CABLE_GROUP_VBVOLT
+ */
+static int max77693_muic_get_cable_type(struct max77693_muic_info *info,
+		enum max77693_muic_cable_group group, bool *attached)
 {
-	int ret = 0;
-	int type;
-	int adc, adc1k, adclow;
+	int cable_type = 0;
+	int adc;
+	int adc1k;
+	int adclow;
+	int vbvolt;
+	int chg_type;
 
-	if (attached) {
-		adc = info->status[0] & STATUS1_ADC_MASK;
-		adclow = info->status[0] & STATUS1_ADCLOW_MASK;
-		adclow >>= STATUS1_ADCLOW_SHIFT;
-		adc1k = info->status[0] & STATUS1_ADC1K_MASK;
-		adc1k >>= STATUS1_ADC1K_SHIFT;
-
-		/**
-		 * [0x1][ADCLow][ADC1K]
-		 * [0x1    0       0  ]	: USB_OTG
-		 * [0x1    1       0  ] : Audio Video Cable with load
-		 * [0x1    1       1  ] : MHL
+	switch (group) {
+	case MAX77693_CABLE_GROUP_ADC:
+		/*
+		 * Read ADC value to check cable type and decide cable state
+		 * according to cable type
 		 */
-		type = ((0x1 << 8) | (adclow << 1) | adc1k);
+		adc = info->status[0] & STATUS1_ADC_MASK;
+		adc >>= STATUS1_ADC_SHIFT;
 
-		/* Store previous ADC value to handle accessory
-		   when accessory will be detached */
-		info->prev_adc = adc;
-		info->prev_adc_gnd = type;
-	} else
-		type = info->prev_adc_gnd;
+		/*
+		 * Check current cable state/cable type and store cable type
+		 * (info->prev_cable_type) for handling cable when cable is
+		 * detached.
+		 */
+		if (adc == MAX77693_MUIC_ADC_OPEN) {
+			*attached = false;
 
-	switch (type) {
-	case MAX77693_MUIC_GND_USB_OTG:
-		/* USB_OTG */
-		ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
-		if (ret < 0)
-			goto out;
-		extcon_set_cable_state(info->edev, "USB-Host", attached);
+			cable_type = info->prev_cable_type;
+			info->prev_cable_type = MAX77693_MUIC_ADC_OPEN;
+		} else {
+			*attached = true;
+
+			cable_type = info->prev_cable_type = adc;
+		}
 		break;
-	case MAX77693_MUIC_GND_AV_CABLE_LOAD:
-		/* Audio Video Cable with load */
-		ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
-		if (ret < 0)
-			goto out;
-		extcon_set_cable_state(info->edev,
-				"Audio-video-load", attached);
+	case MAX77693_CABLE_GROUP_ADC_GND:
+		/*
+		 * Read ADC value to check cable type and decide cable state
+		 * according to cable type
+		 */
+		adc = info->status[0] & STATUS1_ADC_MASK;
+		adc >>= STATUS1_ADC_SHIFT;
+
+		/*
+		 * Check current cable state/cable type and store cable type
+		 * (info->prev_cable_type/_gnd) for handling cable when cable
+		 * is detached.
+		 */
+		if (adc == MAX77693_MUIC_ADC_OPEN) {
+			*attached = false;
+
+			cable_type = info->prev_cable_type_gnd;
+			info->prev_cable_type_gnd = MAX77693_MUIC_ADC_OPEN;
+		} else {
+			*attached = true;
+
+			adclow = info->status[0] & STATUS1_ADCLOW_MASK;
+			adclow >>= STATUS1_ADCLOW_SHIFT;
+			adc1k = info->status[0] & STATUS1_ADC1K_MASK;
+			adc1k >>= STATUS1_ADC1K_SHIFT;
+
+			vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
+			vbvolt >>= STATUS2_VBVOLT_SHIFT;
+
+			/**
+			 * [0x1][VBVolt][ADCLow][ADC1K]
+			 * [0x1    0	   0       0  ]	: USB_OTG
+			 * [0x1    1	   0       0  ]	: USB_OTG_VB
+			 * [0x1    0       1       0  ] : Audio Video Cable with load
+			 * [0x1    0       1       1  ] : MHL without charging connector
+			 * [0x1    1       1       1  ] : MHL with charging connector
+			 */
+			cable_type = ((0x1 << 8)
+					| (vbvolt << 2)
+					| (adclow << 1)
+					| adc1k);
+
+			info->prev_cable_type = adc;
+			info->prev_cable_type_gnd = cable_type;
+		}
+
 		break;
-	case MAX77693_MUIC_GND_MHL_CABLE:
-		/* MHL */
-		extcon_set_cable_state(info->edev, "MHL", attached);
+	case MAX77693_CABLE_GROUP_CHG:
+		/*
+		 * Read charger type to check cable type and decide cable state
+		 * according to type of charger cable.
+		 */
+		chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+		chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+		if (chg_type == MAX77693_CHARGER_TYPE_NONE) {
+			*attached = false;
+
+			cable_type = info->prev_chg_type;
+			info->prev_chg_type = MAX77693_CHARGER_TYPE_NONE;
+		} else {
+			*attached = true;
+
+			/*
+			 * Check current cable state/cable type and store cable
+			 * type(info->prev_chg_type) for handling cable when
+			 * charger cable is detached.
+			 */
+			cable_type = info->prev_chg_type = chg_type;
+		}
+
+		break;
+	case MAX77693_CABLE_GROUP_VBVOLT:
+		/*
+		 * Read ADC value to check cable type and decide cable state
+		 * according to cable type
+		 */
+		adc = info->status[0] & STATUS1_ADC_MASK;
+		adc >>= STATUS1_ADC_SHIFT;
+		chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+		chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+		if (adc == MAX77693_MUIC_ADC_OPEN
+				&& chg_type == MAX77693_CHARGER_TYPE_NONE)
+			*attached = false;
+		else
+			*attached = true;
+
+		/*
+		 * Read vbvolt field, if vbvolt is 1,
+		 * this cable is used for charging.
+		 */
+		vbvolt = info->status[1] & STATUS2_VBVOLT_MASK;
+		vbvolt >>= STATUS2_VBVOLT_SHIFT;
+
+		cable_type = vbvolt;
 		break;
 	default:
-		dev_err(info->dev, "failed to detect %s accessory\n",
-			attached ? "attached" : "detached");
-		dev_err(info->dev, "- adc:0x%x, adclow:0x%x, adc1k:0x%x\n",
-			adc, adclow, adc1k);
-		ret = -EINVAL;
+		dev_err(info->dev, "Unknown cable group (%d)\n", group);
+		cable_type = -EINVAL;
 		break;
 	}
 
-out:
-	return ret;
+	return cable_type;
 }
 
-static int max77693_muic_adc_handler(struct max77693_muic_info *info,
-		int curr_adc, bool attached)
+static int max77693_muic_dock_handler(struct max77693_muic_info *info,
+		int cable_type, bool attached)
 {
 	int ret = 0;
-	int adc;
+	int vbvolt;
+	bool cable_attached;
+	char dock_name[CABLE_NAME_MAX];
 
-	if (attached) {
-		/* Store ADC value to handle accessory
-		   when accessory will be detached */
-		info->prev_adc = curr_adc;
-		adc = curr_adc;
-	} else
-		adc = info->prev_adc;
+	dev_info(info->dev,
+		"external connector is %s (adc:0x%02x)\n",
+		attached ? "attached" : "detached", cable_type);
+
+	switch (cable_type) {
+	case MAX77693_MUIC_ADC_RESERVED_ACC_3:		/* Dock-Smart */
+		/*
+		 * Check power cable whether attached or detached state.
+		 * The Dock-Smart device need surely external power supply.
+		 * If power cable(USB/TA) isn't connected to Dock device,
+		 * user can't use Dock-Smart for desktop mode.
+		 */
+		vbvolt = max77693_muic_get_cable_type(info,
+				MAX77693_CABLE_GROUP_VBVOLT, &cable_attached);
+		if (attached && !vbvolt) {
+			dev_warn(info->dev,
+				"Cannot detect external power supply\n");
+			return 0;
+		}
+
+		/*
+		 * Notify Dock-Smart/MHL state.
+		 * - Dock-Smart device include three type of cable which
+		 * are HDMI, USB for mouse/keyboard and micro-usb port
+		 * for USB/TA cable. Dock-Smart device need always exteranl
+		 * power supply(USB/TA cable through micro-usb cable). Dock-
+		 * Smart device support screen output of target to separate
+		 * monitor and mouse/keyboard for desktop mode.
+		 *
+		 * Features of 'USB/TA cable with Dock-Smart device'
+		 * - Support MHL
+		 * - Support external output feature of audio
+		 * - Support charging through micro-usb port without data
+		 *	     connection if TA cable is connected to target.
+		 * - Support charging and data connection through micro-usb port
+		 *           if USB cable is connected between target and host
+		 *	     device.
+		 * - Support OTG device (Mouse/Keyboard)
+		 */
+		ret = max77693_muic_set_path(info, info->path_usb, attached);
+		if (ret < 0)
+			return ret;
+
+		extcon_set_cable_state(info->edev, "Dock-Smart", attached);
+		extcon_set_cable_state(info->edev, "MHL", attached);
+		goto out;
+	case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:	/* Dock-Car */
+		strcpy(dock_name, "Dock-Car");
+		break;
+	case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE:	/* Dock-Desk */
+		strcpy(dock_name, "Dock-Desk");
+		break;
+	case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:		/* Dock-Audio */
+		strcpy(dock_name, "Dock-Audio");
+		if (!attached)
+			extcon_set_cable_state(info->edev, "USB", false);
+		break;
+	default:
+		dev_err(info->dev, "failed to detect %s dock device\n",
+			attached ? "attached" : "detached");
+		return -EINVAL;
+	}
+
+	/* Dock-Car/Desk/Audio, PATH:AUDIO */
+	ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+	if (ret < 0)
+		return ret;
+	extcon_set_cable_state(info->edev, dock_name, attached);
+
+out:
+	return 0;
+}
+
+static int max77693_muic_dock_button_handler(struct max77693_muic_info *info,
+		int button_type, bool attached)
+{
+	struct input_dev *dock = info->dock;
+	unsigned int code;
+
+	switch (button_type) {
+	case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON-1
+		... MAX77693_MUIC_ADC_REMOTE_S3_BUTTON+1:
+		/* DOCK_KEY_PREV */
+		code = KEY_PREVIOUSSONG;
+		break;
+	case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON-1
+		... MAX77693_MUIC_ADC_REMOTE_S7_BUTTON+1:
+		/* DOCK_KEY_NEXT */
+		code = KEY_NEXTSONG;
+		break;
+	case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
+		/* DOCK_VOL_DOWN */
+		code = KEY_VOLUMEDOWN;
+		break;
+	case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
+		/* DOCK_VOL_UP */
+		code = KEY_VOLUMEUP;
+		break;
+	case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON-1
+		... MAX77693_MUIC_ADC_REMOTE_S12_BUTTON+1:
+		/* DOCK_KEY_PLAY_PAUSE */
+		code = KEY_PLAYPAUSE;
+		break;
+	default:
+		dev_err(info->dev,
+			"failed to detect %s key (adc:0x%x)\n",
+			attached ? "pressed" : "released", button_type);
+		return -EINVAL;
+	}
+
+	input_event(dock, EV_KEY, code, attached);
+	input_sync(dock);
+
+	return 0;
+}
+
+static int max77693_muic_adc_ground_handler(struct max77693_muic_info *info)
+{
+	int cable_type_gnd;
+	int ret = 0;
+	bool attached;
+
+	cable_type_gnd = max77693_muic_get_cable_type(info,
+				MAX77693_CABLE_GROUP_ADC_GND, &attached);
+
+	switch (cable_type_gnd) {
+	case MAX77693_MUIC_GND_USB_OTG:
+	case MAX77693_MUIC_GND_USB_OTG_VB:
+		/* USB_OTG, PATH: AP_USB */
+		ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
+		if (ret < 0)
+			return ret;
+		extcon_set_cable_state(info->edev, "USB-Host", attached);
+		break;
+	case MAX77693_MUIC_GND_AV_CABLE_LOAD:
+		/* Audio Video Cable with load, PATH:AUDIO */
+		ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+		if (ret < 0)
+			return ret;
+		extcon_set_cable_state(info->edev,
+				"Audio-video-load", attached);
+		break;
+	case MAX77693_MUIC_GND_MHL:
+	case MAX77693_MUIC_GND_MHL_VB:
+		/* MHL or MHL with USB/TA cable */
+		extcon_set_cable_state(info->edev, "MHL", attached);
+		break;
+	default:
+		dev_err(info->dev, "failed to detect %s cable of gnd type\n",
+			attached ? "attached" : "detached");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int max77693_muic_jig_handler(struct max77693_muic_info *info,
+		int cable_type, bool attached)
+{
+	char cable_name[32];
+	int ret = 0;
+	u8 path = CONTROL1_SW_OPEN;
+
+	dev_info(info->dev,
+		"external connector is %s (adc:0x%02x)\n",
+		attached ? "attached" : "detached", cable_type);
+
+	switch (cable_type) {
+	case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:	/* ADC_JIG_USB_OFF */
+		/* PATH:AP_USB */
+		strcpy(cable_name, "JIG-USB-OFF");
+		path = CONTROL1_SW_USB;
+		break;
+	case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:	/* ADC_JIG_USB_ON */
+		/* PATH:AP_USB */
+		strcpy(cable_name, "JIG-USB-ON");
+		path = CONTROL1_SW_USB;
+		break;
+	case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:	/* ADC_JIG_UART_OFF */
+		/* PATH:AP_UART */
+		strcpy(cable_name, "JIG-UART-OFF");
+		path = CONTROL1_SW_UART;
+		break;
+	default:
+		dev_err(info->dev, "failed to detect %s jig cable\n",
+			attached ? "attached" : "detached");
+		return -EINVAL;
+	}
+
+	ret = max77693_muic_set_path(info, path, attached);
+	if (ret < 0)
+		return ret;
+
+	extcon_set_cable_state(info->edev, cable_name, attached);
+
+	return 0;
+}
+
+static int max77693_muic_adc_handler(struct max77693_muic_info *info)
+{
+	int cable_type;
+	int button_type;
+	bool attached;
+	int ret = 0;
+
+	/* Check accessory state which is either detached or attached */
+	cable_type = max77693_muic_get_cable_type(info,
+				MAX77693_CABLE_GROUP_ADC, &attached);
 
 	dev_info(info->dev,
 		"external connector is %s (adc:0x%02x, prev_adc:0x%x)\n",
-		attached ? "attached" : "detached", curr_adc, info->prev_adc);
+		attached ? "attached" : "detached", cable_type,
+		info->prev_cable_type);
 
-	switch (adc) {
+	switch (cable_type) {
 	case MAX77693_MUIC_ADC_GROUND:
 		/* USB_OTG/MHL/Audio */
-		max77693_muic_adc_ground_handler(info, attached);
+		max77693_muic_adc_ground_handler(info);
 		break;
 	case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
 	case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:
-		/* USB */
-		ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
-		if (ret < 0)
-			goto out;
-		extcon_set_cable_state(info->edev, "USB", attached);
-		break;
 	case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:
-	case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:
 		/* JIG */
-		ret = max77693_muic_set_path(info, CONTROL1_SW_UART, attached);
+		ret = max77693_muic_jig_handler(info, cable_type, attached);
 		if (ret < 0)
-			goto out;
-		extcon_set_cable_state(info->edev, "JIG", attached);
+			return ret;
 		break;
-	case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE:
-		/* Audio Video cable with no-load */
-		ret = max77693_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
+	case MAX77693_MUIC_ADC_RESERVED_ACC_3:		/* Dock-Smart */
+	case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:	/* Dock-Car */
+	case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE:	/* Dock-Desk */
+	case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:		/* Dock-Audio */
+		/*
+		 * DOCK device
+		 *
+		 * The MAX77693 MUIC device can detect total 34 cable type
+		 * except of charger cable and MUIC device didn't define
+		 * specfic role of cable in the range of from 0x01 to 0x12
+		 * of ADC value. So, can use/define cable with no role according
+		 * to schema of hardware board.
+		 */
+		ret = max77693_muic_dock_handler(info, cable_type, attached);
 		if (ret < 0)
-			goto out;
-		extcon_set_cable_state(info->edev,
-				"Audio-video-noload", attached);
+			return ret;
+		break;
+	case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON:	/* DOCK_KEY_PREV */
+	case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON:	/* DOCK_KEY_NEXT */
+	case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:	/* DOCK_VOL_DOWN */
+	case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:	/* DOCK_VOL_UP */
+	case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON:	/* DOCK_KEY_PLAY_PAUSE */
+		/*
+		 * Button of DOCK device
+		 * - the Prev/Next/Volume Up/Volume Down/Play-Pause button
+		 *
+		 * The MAX77693 MUIC device can detect total 34 cable type
+		 * except of charger cable and MUIC device didn't define
+		 * specfic role of cable in the range of from 0x01 to 0x12
+		 * of ADC value. So, can use/define cable with no role according
+		 * to schema of hardware board.
+		 */
+		if (attached)
+			button_type = info->prev_button_type = cable_type;
+		else
+			button_type = info->prev_button_type;
+
+		ret = max77693_muic_dock_button_handler(info, button_type,
+							attached);
+		if (ret < 0)
+			return ret;
 		break;
 	case MAX77693_MUIC_ADC_SEND_END_BUTTON:
 	case MAX77693_MUIC_ADC_REMOTE_S1_BUTTON:
 	case MAX77693_MUIC_ADC_REMOTE_S2_BUTTON:
-	case MAX77693_MUIC_ADC_REMOTE_S3_BUTTON:
 	case MAX77693_MUIC_ADC_REMOTE_S4_BUTTON:
 	case MAX77693_MUIC_ADC_REMOTE_S5_BUTTON:
 	case MAX77693_MUIC_ADC_REMOTE_S6_BUTTON:
-	case MAX77693_MUIC_ADC_REMOTE_S7_BUTTON:
 	case MAX77693_MUIC_ADC_REMOTE_S8_BUTTON:
-	case MAX77693_MUIC_ADC_REMOTE_S9_BUTTON:
-	case MAX77693_MUIC_ADC_REMOTE_S10_BUTTON:
 	case MAX77693_MUIC_ADC_REMOTE_S11_BUTTON:
-	case MAX77693_MUIC_ADC_REMOTE_S12_BUTTON:
 	case MAX77693_MUIC_ADC_RESERVED_ACC_1:
 	case MAX77693_MUIC_ADC_RESERVED_ACC_2:
-	case MAX77693_MUIC_ADC_RESERVED_ACC_3:
 	case MAX77693_MUIC_ADC_RESERVED_ACC_4:
 	case MAX77693_MUIC_ADC_RESERVED_ACC_5:
 	case MAX77693_MUIC_ADC_CEA936_AUDIO:
@@ -432,60 +733,164 @@
 	case MAX77693_MUIC_ADC_TTY_CONVERTER:
 	case MAX77693_MUIC_ADC_UART_CABLE:
 	case MAX77693_MUIC_ADC_CEA936A_TYPE1_CHG:
-	case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:
 	case MAX77693_MUIC_ADC_CEA936A_TYPE2_CHG:
-		/* This accessory isn't used in general case if it is specially
-		   needed to detect additional accessory, should implement
-		   proper operation when this accessory is attached/detached. */
+		/*
+		 * This accessory isn't used in general case if it is specially
+		 * needed to detect additional accessory, should implement
+		 * proper operation when this accessory is attached/detached.
+		 */
 		dev_info(info->dev,
 			"accessory is %s but it isn't used (adc:0x%x)\n",
-			attached ? "attached" : "detached", adc);
-		goto out;
+			attached ? "attached" : "detached", cable_type);
+		return -EAGAIN;
 	default:
 		dev_err(info->dev,
 			"failed to detect %s accessory (adc:0x%x)\n",
-			attached ? "attached" : "detached", adc);
-		ret = -EINVAL;
-		goto out;
+			attached ? "attached" : "detached", cable_type);
+		return -EINVAL;
 	}
 
-out:
-	return ret;
+	return 0;
 }
 
-static int max77693_muic_chg_handler(struct max77693_muic_info *info,
-		int curr_chg_type, bool attached)
+static int max77693_muic_chg_handler(struct max77693_muic_info *info)
 {
-	int ret = 0;
 	int chg_type;
+	int cable_type_gnd;
+	int cable_type;
+	bool attached;
+	bool cable_attached;
+	int ret = 0;
 
-	if (attached) {
-		/* Store previous charger type to control
-		   when charger accessory will be detached */
-		info->prev_chg_type = curr_chg_type;
-		chg_type = curr_chg_type;
-	} else
-		chg_type = info->prev_chg_type;
+	chg_type = max77693_muic_get_cable_type(info,
+				MAX77693_CABLE_GROUP_CHG, &attached);
 
 	dev_info(info->dev,
 		"external connector is %s(chg_type:0x%x, prev_chg_type:0x%x)\n",
 			attached ? "attached" : "detached",
-			curr_chg_type, info->prev_chg_type);
+			chg_type, info->prev_chg_type);
 
 	switch (chg_type) {
 	case MAX77693_CHARGER_TYPE_USB:
-		ret = max77693_muic_set_path(info, CONTROL1_SW_USB, attached);
-		if (ret < 0)
-			goto out;
-		extcon_set_cable_state(info->edev, "USB", attached);
+	case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
+	case MAX77693_CHARGER_TYPE_NONE:
+		/* Check MAX77693_CABLE_GROUP_ADC_GND type */
+		cable_type_gnd = max77693_muic_get_cable_type(info,
+					MAX77693_CABLE_GROUP_ADC_GND,
+					&cable_attached);
+		switch (cable_type_gnd) {
+		case MAX77693_MUIC_GND_MHL:
+		case MAX77693_MUIC_GND_MHL_VB:
+			/*
+			 * MHL cable with MHL_TA(USB/TA) cable
+			 * - MHL cable include two port(HDMI line and separate micro-
+			 * usb port. When the target connect MHL cable, extcon driver
+			 * check whether MHL_TA(USB/TA) cable is connected. If MHL_TA
+			 * cable is connected, extcon driver notify state to notifiee
+			 * for charging battery.
+			 *
+			 * Features of 'MHL_TA(USB/TA) with MHL cable'
+			 * - Support MHL
+			 * - Support charging through micro-usb port without data connection
+			 */
+			extcon_set_cable_state(info->edev, "MHL_TA", attached);
+			if (!cable_attached)
+				extcon_set_cable_state(info->edev, "MHL", cable_attached);
+			break;
+		}
+
+		/* Check MAX77693_CABLE_GROUP_ADC type */
+		cable_type = max77693_muic_get_cable_type(info,
+					MAX77693_CABLE_GROUP_ADC,
+					&cable_attached);
+		switch (cable_type) {
+		case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD:		/* Dock-Audio */
+			/*
+			 * Dock-Audio device with USB/TA cable
+			 * - Dock device include two port(Dock-Audio and micro-usb
+			 * port). When the target connect Dock-Audio device, extcon
+			 * driver check whether USB/TA cable is connected. If USB/TA
+			 * cable is connected, extcon driver notify state to notifiee
+			 * for charging battery.
+			 *
+			 * Features of 'USB/TA cable with Dock-Audio device'
+			 * - Support external output feature of audio.
+			 * - Support charging through micro-usb port without data
+			 *           connection.
+			 */
+			extcon_set_cable_state(info->edev, "USB", attached);
+
+			if (!cable_attached)
+				extcon_set_cable_state(info->edev, "Dock-Audio", cable_attached);
+			break;
+		case MAX77693_MUIC_ADC_RESERVED_ACC_3:		/* Dock-Smart */
+			/*
+			 * Dock-Smart device with USB/TA cable
+			 * - Dock-Desk device include three type of cable which
+			 * are HDMI, USB for mouse/keyboard and micro-usb port
+			 * for USB/TA cable. Dock-Smart device need always exteranl
+			 * power supply(USB/TA cable through micro-usb cable). Dock-
+			 * Smart device support screen output of target to separate
+			 * monitor and mouse/keyboard for desktop mode.
+			 *
+			 * Features of 'USB/TA cable with Dock-Smart device'
+			 * - Support MHL
+			 * - Support external output feature of audio
+			 * - Support charging through micro-usb port without data
+			 *	     connection if TA cable is connected to target.
+			 * - Support charging and data connection through micro-usb port
+			 *           if USB cable is connected between target and host
+			 *	     device.
+			 * - Support OTG device (Mouse/Keyboard)
+			 */
+			ret = max77693_muic_set_path(info, info->path_usb, attached);
+			if (ret < 0)
+				return ret;
+
+			extcon_set_cable_state(info->edev, "Dock-Smart", attached);
+			extcon_set_cable_state(info->edev, "MHL", attached);
+
+			break;
+		}
+
+		/* Check MAX77693_CABLE_GROUP_CHG type */
+		switch (chg_type) {
+		case MAX77693_CHARGER_TYPE_NONE:
+			/*
+			 * When MHL(with USB/TA cable) or Dock-Audio with USB/TA cable
+			 * is attached, muic device happen below two interrupt.
+			 * - 'MAX77693_MUIC_IRQ_INT1_ADC' for detecting MHL/Dock-Audio.
+			 * - 'MAX77693_MUIC_IRQ_INT2_CHGTYP' for detecting USB/TA cable
+			 *   connected to MHL or Dock-Audio.
+			 * Always, happen eariler MAX77693_MUIC_IRQ_INT1_ADC interrupt
+			 * than MAX77693_MUIC_IRQ_INT2_CHGTYP interrupt.
+			 *
+			 * If user attach MHL (with USB/TA cable and immediately detach
+			 * MHL with USB/TA cable before MAX77693_MUIC_IRQ_INT2_CHGTYP
+			 * interrupt is happened, USB/TA cable remain connected state to
+			 * target. But USB/TA cable isn't connected to target. The user
+			 * be face with unusual action. So, driver should check this
+			 * situation in spite of, that previous charger type is N/A.
+			 */
+			break;
+		case MAX77693_CHARGER_TYPE_USB:
+			/* Only USB cable, PATH:AP_USB */
+			ret = max77693_muic_set_path(info, info->path_usb, attached);
+			if (ret < 0)
+				return ret;
+
+			extcon_set_cable_state(info->edev, "USB", attached);
+			break;
+		case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
+			/* Only TA cable */
+			extcon_set_cable_state(info->edev, "TA", attached);
+			break;
+		}
 		break;
 	case MAX77693_CHARGER_TYPE_DOWNSTREAM_PORT:
 		extcon_set_cable_state(info->edev,
 				"Charge-downstream", attached);
 		break;
-	case MAX77693_CHARGER_TYPE_DEDICATED_CHG:
-		extcon_set_cable_state(info->edev, "TA", attached);
-		break;
 	case MAX77693_CHARGER_TYPE_APPLE_500MA:
 		extcon_set_cable_state(info->edev, "Slow-charger", attached);
 		break;
@@ -498,22 +903,18 @@
 		dev_err(info->dev,
 			"failed to detect %s accessory (chg_type:0x%x)\n",
 			attached ? "attached" : "detached", chg_type);
-		ret = -EINVAL;
-		goto out;
+		return -EINVAL;
 	}
 
-out:
-	return ret;
+	return 0;
 }
 
 static void max77693_muic_irq_work(struct work_struct *work)
 {
 	struct max77693_muic_info *info = container_of(work,
 			struct max77693_muic_info, irq_work);
-	int curr_adc, curr_chg_type;
 	int irq_type = -1;
 	int i, ret = 0;
-	bool attached = true;
 
 	if (!info->edev)
 		return;
@@ -539,14 +940,7 @@
 	case MAX77693_MUIC_IRQ_INT1_ADC1K:
 		/* Handle all of accessory except for
 		   type of charger accessory */
-		curr_adc = info->status[0] & STATUS1_ADC_MASK;
-		curr_adc >>= STATUS1_ADC_SHIFT;
-
-		/* Check accessory state which is either detached or attached */
-		if (curr_adc == MAX77693_MUIC_ADC_OPEN)
-			attached = false;
-
-		ret = max77693_muic_adc_handler(info, curr_adc, attached);
+		ret = max77693_muic_adc_handler(info);
 		break;
 	case MAX77693_MUIC_IRQ_INT2_CHGTYP:
 	case MAX77693_MUIC_IRQ_INT2_CHGDETREUN:
@@ -555,15 +949,7 @@
 	case MAX77693_MUIC_IRQ_INT2_VBVOLT:
 	case MAX77693_MUIC_IRQ_INT2_VIDRM:
 		/* Handle charger accessory */
-		curr_chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
-		curr_chg_type >>= STATUS2_CHGTYP_SHIFT;
-
-		/* Check charger accessory state which
-		   is either detached or attached */
-		if (curr_chg_type == MAX77693_CHARGER_TYPE_NONE)
-			attached = false;
-
-		ret = max77693_muic_chg_handler(info, curr_chg_type, attached);
+		ret = max77693_muic_chg_handler(info);
 		break;
 	case MAX77693_MUIC_IRQ_INT3_EOC:
 	case MAX77693_MUIC_IRQ_INT3_CGMBC:
@@ -575,7 +961,8 @@
 	default:
 		dev_err(info->dev, "muic interrupt: irq %d occurred\n",
 				irq_type);
-		break;
+		mutex_unlock(&info->mutex);
+		return;
 	}
 
 	if (ret < 0)
@@ -604,7 +991,9 @@
 static int max77693_muic_detect_accessory(struct max77693_muic_info *info)
 {
 	int ret = 0;
-	int adc, chg_type;
+	int adc;
+	int chg_type;
+	bool attached;
 
 	mutex_lock(&info->mutex);
 
@@ -617,35 +1006,39 @@
 		return -EINVAL;
 	}
 
-	adc = info->status[0] & STATUS1_ADC_MASK;
-	adc >>= STATUS1_ADC_SHIFT;
-
-	if (adc != MAX77693_MUIC_ADC_OPEN) {
-		dev_info(info->dev,
-			"external connector is attached (adc:0x%02x)\n", adc);
-
-		ret = max77693_muic_adc_handler(info, adc, true);
-		if (ret < 0)
-			dev_err(info->dev, "failed to detect accessory\n");
-		goto out;
+	adc = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_ADC,
+					&attached);
+	if (attached && adc != MAX77693_MUIC_ADC_OPEN) {
+		ret = max77693_muic_adc_handler(info);
+		if (ret < 0) {
+			dev_err(info->dev, "Cannot detect accessory\n");
+			mutex_unlock(&info->mutex);
+			return ret;
+		}
 	}
 
-	chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
-	chg_type >>= STATUS2_CHGTYP_SHIFT;
-
-	if (chg_type != MAX77693_CHARGER_TYPE_NONE) {
-		dev_info(info->dev,
-			"external connector is attached (chg_type:0x%x)\n",
-			chg_type);
-
-		max77693_muic_chg_handler(info, chg_type, true);
-		if (ret < 0)
-			dev_err(info->dev, "failed to detect charger accessory\n");
+	chg_type = max77693_muic_get_cable_type(info, MAX77693_CABLE_GROUP_CHG,
+					&attached);
+	if (attached && chg_type != MAX77693_CHARGER_TYPE_NONE) {
+		ret = max77693_muic_chg_handler(info);
+		if (ret < 0) {
+			dev_err(info->dev, "Cannot detect charger accessory\n");
+			mutex_unlock(&info->mutex);
+			return ret;
+		}
 	}
 
-out:
 	mutex_unlock(&info->mutex);
-	return ret;
+
+	return 0;
+}
+
+static void max77693_muic_detect_cable_wq(struct work_struct *work)
+{
+	struct max77693_muic_info *info = container_of(to_delayed_work(work),
+				struct max77693_muic_info, wq_detcable);
+
+	max77693_muic_detect_accessory(info);
 }
 
 static int max77693_muic_probe(struct platform_device *pdev)
@@ -654,7 +1047,9 @@
 	struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
 	struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
 	struct max77693_muic_info *info;
-	int ret, i;
+	int delay_jiffies;
+	int ret;
+	int i;
 	u8 id;
 
 	info = devm_kzalloc(&pdev->dev, sizeof(struct max77693_muic_info),
@@ -678,6 +1073,32 @@
 			return ret;
 		}
 	}
+
+	/* Register input device for button of dock device */
+	info->dock = devm_input_allocate_device(&pdev->dev);
+	if (!info->dock) {
+		dev_err(&pdev->dev, "%s: failed to allocate input\n", __func__);
+		return -ENOMEM;
+	}
+	info->dock->name = "max77693-muic/dock";
+	info->dock->phys = "max77693-muic/extcon";
+	info->dock->dev.parent = &pdev->dev;
+
+	__set_bit(EV_REP, info->dock->evbit);
+
+	input_set_capability(info->dock, EV_KEY, KEY_VOLUMEUP);
+	input_set_capability(info->dock, EV_KEY, KEY_VOLUMEDOWN);
+	input_set_capability(info->dock, EV_KEY, KEY_PLAYPAUSE);
+	input_set_capability(info->dock, EV_KEY, KEY_PREVIOUSSONG);
+	input_set_capability(info->dock, EV_KEY, KEY_NEXTSONG);
+
+	ret = input_register_device(info->dock);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "Cannot register input device error(%d)\n",
+				ret);
+		return ret;
+	}
+
 	platform_set_drvdata(pdev, info);
 	mutex_init(&info->mutex);
 
@@ -697,13 +1118,13 @@
 
 		ret = request_threaded_irq(virq, NULL,
 				max77693_muic_irq_handler,
-				IRQF_ONESHOT, muic_irq->name, info);
+				IRQF_NO_SUSPEND,
+				muic_irq->name, info);
 		if (ret) {
 			dev_err(&pdev->dev,
 				"failed: irq request (IRQ: %d,"
 				" error :%d)\n",
 				muic_irq->irq, ret);
-
 			goto err_irq;
 		}
 	}
@@ -749,23 +1170,54 @@
 				= muic_pdata->init_data[i].data;
 	}
 
+	/*
+	 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+	 * h/w path of COMP2/COMN1 on CONTROL1 register.
+	 */
+	if (muic_pdata->path_uart)
+		info->path_uart = muic_pdata->path_uart;
+	else
+		info->path_uart = CONTROL1_SW_UART;
+
+	if (muic_pdata->path_usb)
+		info->path_usb = muic_pdata->path_usb;
+	else
+		info->path_usb = CONTROL1_SW_USB;
+
+	/* Set initial path for UART */
+	 max77693_muic_set_path(info, info->path_uart, true);
+
 	/* Check revision number of MUIC device*/
 	ret = max77693_read_reg(info->max77693->regmap_muic,
 			MAX77693_MUIC_REG_ID, &id);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "failed to read revision number\n");
-		goto err_irq;
+		goto err_extcon;
 	}
 	dev_info(info->dev, "device ID : 0x%x\n", id);
 
 	/* Set ADC debounce time */
 	max77693_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
 
-	/* Detect accessory on boot */
-	max77693_muic_detect_accessory(info);
+	/*
+	 * Detect accessory after completing the initialization of platform
+	 *
+	 * - Use delayed workqueue to detect cable state and then
+	 * notify cable state to notifiee/platform through uevent.
+	 * After completing the booting of platform, the extcon provider
+	 * driver should notify cable state to upper layer.
+	 */
+	INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
+	if (muic_pdata->detcable_delay_ms)
+		delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms);
+	else
+		delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+	schedule_delayed_work(&info->wq_detcable, delay_jiffies);
 
 	return ret;
 
+err_extcon:
+	extcon_dev_unregister(info->edev);
 err_irq:
 	while (--i >= 0)
 		free_irq(muic_irqs[i].virq, info);
@@ -780,6 +1232,7 @@
 	for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
 		free_irq(muic_irqs[i].virq, info);
 	cancel_work_sync(&info->irq_work);
+	input_unregister_device(info->dock);
 	extcon_dev_unregister(info->edev);
 
 	return 0;
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 93009fe..e636d95 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -29,51 +29,14 @@
 #include <linux/irqdomain.h>
 
 #define	DEV_NAME			"max8997-muic"
+#define	DELAY_MS_DEFAULT		20000		/* unit: millisecond */
 
-/* MAX8997-MUIC STATUS1 register */
-#define STATUS1_ADC_SHIFT		0
-#define STATUS1_ADCLOW_SHIFT		5
-#define STATUS1_ADCERR_SHIFT		6
-#define STATUS1_ADC_MASK		(0x1f << STATUS1_ADC_SHIFT)
-#define STATUS1_ADCLOW_MASK		(0x1 << STATUS1_ADCLOW_SHIFT)
-#define STATUS1_ADCERR_MASK		(0x1 << STATUS1_ADCERR_SHIFT)
-
-/* MAX8997-MUIC STATUS2 register */
-#define STATUS2_CHGTYP_SHIFT		0
-#define STATUS2_CHGDETRUN_SHIFT		3
-#define STATUS2_DCDTMR_SHIFT		4
-#define STATUS2_DBCHG_SHIFT		5
-#define STATUS2_VBVOLT_SHIFT		6
-#define STATUS2_CHGTYP_MASK		(0x7 << STATUS2_CHGTYP_SHIFT)
-#define STATUS2_CHGDETRUN_MASK		(0x1 << STATUS2_CHGDETRUN_SHIFT)
-#define STATUS2_DCDTMR_MASK		(0x1 << STATUS2_DCDTMR_SHIFT)
-#define STATUS2_DBCHG_MASK		(0x1 << STATUS2_DBCHG_SHIFT)
-#define STATUS2_VBVOLT_MASK		(0x1 << STATUS2_VBVOLT_SHIFT)
-
-/* MAX8997-MUIC STATUS3 register */
-#define STATUS3_OVP_SHIFT		2
-#define STATUS3_OVP_MASK		(0x1 << STATUS3_OVP_SHIFT)
-
-/* MAX8997-MUIC CONTROL1 register */
-#define COMN1SW_SHIFT			0
-#define COMP2SW_SHIFT			3
-#define COMN1SW_MASK			(0x7 << COMN1SW_SHIFT)
-#define COMP2SW_MASK			(0x7 << COMP2SW_SHIFT)
-#define SW_MASK				(COMP2SW_MASK | COMN1SW_MASK)
-
-#define MAX8997_SW_USB		((1 << COMP2SW_SHIFT) | (1 << COMN1SW_SHIFT))
-#define MAX8997_SW_AUDIO	((2 << COMP2SW_SHIFT) | (2 << COMN1SW_SHIFT))
-#define MAX8997_SW_UART		((3 << COMP2SW_SHIFT) | (3 << COMN1SW_SHIFT))
-#define MAX8997_SW_OPEN		((0 << COMP2SW_SHIFT) | (0 << COMN1SW_SHIFT))
-
-#define	MAX8997_ADC_GROUND		0x00
-#define	MAX8997_ADC_MHL			0x01
-#define	MAX8997_ADC_JIG_USB_1		0x18
-#define	MAX8997_ADC_JIG_USB_2		0x19
-#define	MAX8997_ADC_DESKDOCK		0x1a
-#define	MAX8997_ADC_JIG_UART		0x1c
-#define	MAX8997_ADC_CARDOCK		0x1d
-#define	MAX8997_ADC_OPEN		0x1f
+enum max8997_muic_adc_debounce_time {
+	ADC_DEBOUNCE_TIME_0_5MS = 0,	/* 0.5ms */
+	ADC_DEBOUNCE_TIME_10MS,		/* 10ms */
+	ADC_DEBOUNCE_TIME_25MS,		/* 25ms */
+	ADC_DEBOUNCE_TIME_38_62MS,	/* 38.62ms */
+};
 
 struct max8997_muic_irq {
 	unsigned int irq;
@@ -82,61 +45,303 @@
 };
 
 static struct max8997_muic_irq muic_irqs[] = {
-	{ MAX8997_MUICIRQ_ADCError, "muic-ADC_error" },
-	{ MAX8997_MUICIRQ_ADCLow, "muic-ADC_low" },
-	{ MAX8997_MUICIRQ_ADC, "muic-ADC" },
-	{ MAX8997_MUICIRQ_VBVolt, "muic-VB_voltage" },
-	{ MAX8997_MUICIRQ_DBChg, "muic-DB_charger" },
-	{ MAX8997_MUICIRQ_DCDTmr, "muic-DCD_timer" },
-	{ MAX8997_MUICIRQ_ChgDetRun, "muic-CDR_status" },
-	{ MAX8997_MUICIRQ_ChgTyp, "muic-charger_type" },
-	{ MAX8997_MUICIRQ_OVP, "muic-over_voltage" },
+	{ MAX8997_MUICIRQ_ADCError,	"muic-ADCERROR" },
+	{ MAX8997_MUICIRQ_ADCLow,	"muic-ADCLOW" },
+	{ MAX8997_MUICIRQ_ADC,		"muic-ADC" },
+	{ MAX8997_MUICIRQ_VBVolt,	"muic-VBVOLT" },
+	{ MAX8997_MUICIRQ_DBChg,	"muic-DBCHG" },
+	{ MAX8997_MUICIRQ_DCDTmr,	"muic-DCDTMR" },
+	{ MAX8997_MUICIRQ_ChgDetRun,	"muic-CHGDETRUN" },
+	{ MAX8997_MUICIRQ_ChgTyp,	"muic-CHGTYP" },
+	{ MAX8997_MUICIRQ_OVP,		"muic-OVP" },
+};
+
+/* Define supported cable type */
+enum max8997_muic_acc_type {
+	MAX8997_MUIC_ADC_GROUND = 0x0,
+	MAX8997_MUIC_ADC_MHL,			/* MHL*/
+	MAX8997_MUIC_ADC_REMOTE_S1_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S2_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S3_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S4_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S5_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S6_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S7_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S8_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S9_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S10_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S11_BUTTON,
+	MAX8997_MUIC_ADC_REMOTE_S12_BUTTON,
+	MAX8997_MUIC_ADC_RESERVED_ACC_1,
+	MAX8997_MUIC_ADC_RESERVED_ACC_2,
+	MAX8997_MUIC_ADC_RESERVED_ACC_3,
+	MAX8997_MUIC_ADC_RESERVED_ACC_4,
+	MAX8997_MUIC_ADC_RESERVED_ACC_5,
+	MAX8997_MUIC_ADC_CEA936_AUDIO,
+	MAX8997_MUIC_ADC_PHONE_POWERED_DEV,
+	MAX8997_MUIC_ADC_TTY_CONVERTER,
+	MAX8997_MUIC_ADC_UART_CABLE,
+	MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG,
+	MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF,	/* JIG-USB-OFF */
+	MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON,	/* JIG-USB-ON */
+	MAX8997_MUIC_ADC_AV_CABLE_NOLOAD,	/* DESKDOCK */
+	MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG,
+	MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF,	/* JIG-UART */
+	MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON,	/* CARDOCK */
+	MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE,
+	MAX8997_MUIC_ADC_OPEN,			/* OPEN */
+};
+
+enum max8997_muic_cable_group {
+	MAX8997_CABLE_GROUP_ADC = 0,
+	MAX8997_CABLE_GROUP_ADC_GND,
+	MAX8997_CABLE_GROUP_CHG,
+	MAX8997_CABLE_GROUP_VBVOLT,
+};
+
+enum max8997_muic_usb_type {
+	MAX8997_USB_HOST,
+	MAX8997_USB_DEVICE,
+};
+
+enum max8997_muic_charger_type {
+	MAX8997_CHARGER_TYPE_NONE = 0,
+	MAX8997_CHARGER_TYPE_USB,
+	MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT,
+	MAX8997_CHARGER_TYPE_DEDICATED_CHG,
+	MAX8997_CHARGER_TYPE_500MA,
+	MAX8997_CHARGER_TYPE_1A,
+	MAX8997_CHARGER_TYPE_DEAD_BATTERY = 7,
 };
 
 struct max8997_muic_info {
 	struct device *dev;
 	struct i2c_client *muic;
-	struct max8997_muic_platform_data *muic_pdata;
+	struct extcon_dev *edev;
+	int prev_cable_type;
+	int prev_chg_type;
+	u8 status[2];
 
 	int irq;
 	struct work_struct irq_work;
-
-	enum max8997_muic_charger_type pre_charger_type;
-	int pre_adc;
-
 	struct mutex mutex;
 
-	struct extcon_dev	*edev;
+	struct max8997_muic_platform_data *muic_pdata;
+	enum max8997_muic_charger_type pre_charger_type;
+
+	/*
+	 * Use delayed workqueue to detect cable state and then
+	 * notify cable state to notifiee/platform through uevent.
+	 * After completing the booting of platform, the extcon provider
+	 * driver should notify cable state to upper layer.
+	 */
+	struct delayed_work wq_detcable;
+
+	/*
+	 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+	 * h/w path of COMP2/COMN1 on CONTROL1 register.
+	 */
+	int path_usb;
+	int path_uart;
 };
 
-const char *max8997_extcon_cable[] = {
-	[0] = "USB",
-	[1] = "USB-Host",
-	[2] = "TA",
-	[3] = "Fast-charger",
-	[4] = "Slow-charger",
-	[5] = "Charge-downstream",
-	[6] = "MHL",
-	[7] = "Dock-desk",
-	[8] = "Dock-card",
-	[9] = "JIG",
+enum {
+	EXTCON_CABLE_USB = 0,
+	EXTCON_CABLE_USB_HOST,
+	EXTCON_CABLE_TA,
+	EXTCON_CABLE_FAST_CHARGER,
+	EXTCON_CABLE_SLOW_CHARGER,
+	EXTCON_CABLE_CHARGE_DOWNSTREAM,
+	EXTCON_CABLE_MHL,
+	EXTCON_CABLE_DOCK_DESK,
+	EXTCON_CABLE_DOCK_CARD,
+	EXTCON_CABLE_JIG,
+
+	_EXTCON_CABLE_NUM,
+};
+
+static const char *max8997_extcon_cable[] = {
+	[EXTCON_CABLE_USB]			= "USB",
+	[EXTCON_CABLE_USB_HOST]			= "USB-Host",
+	[EXTCON_CABLE_TA]			= "TA",
+	[EXTCON_CABLE_FAST_CHARGER]		= "Fast-charger",
+	[EXTCON_CABLE_SLOW_CHARGER]		= "Slow-charger",
+	[EXTCON_CABLE_CHARGE_DOWNSTREAM]	= "Charge-downstream",
+	[EXTCON_CABLE_MHL]			= "MHL",
+	[EXTCON_CABLE_DOCK_DESK]		= "Dock-Desk",
+	[EXTCON_CABLE_DOCK_CARD]		= "Dock-Card",
+	[EXTCON_CABLE_JIG]			= "JIG",
 
 	NULL,
 };
 
+/*
+ * max8997_muic_set_debounce_time - Set the debounce time of ADC
+ * @info: the instance including private data of max8997 MUIC
+ * @time: the debounce time of ADC
+ */
+static int max8997_muic_set_debounce_time(struct max8997_muic_info *info,
+		enum max8997_muic_adc_debounce_time time)
+{
+	int ret;
+
+	switch (time) {
+	case ADC_DEBOUNCE_TIME_0_5MS:
+	case ADC_DEBOUNCE_TIME_10MS:
+	case ADC_DEBOUNCE_TIME_25MS:
+	case ADC_DEBOUNCE_TIME_38_62MS:
+		ret = max8997_update_reg(info->muic,
+					  MAX8997_MUIC_REG_CONTROL3,
+					  time << CONTROL3_ADCDBSET_SHIFT,
+					  CONTROL3_ADCDBSET_MASK);
+		if (ret) {
+			dev_err(info->dev, "failed to set ADC debounce time\n");
+			return -EAGAIN;
+		}
+		break;
+	default:
+		dev_err(info->dev, "invalid ADC debounce time\n");
+		return -EINVAL;
+	}
+
+	return 0;
+};
+
+/*
+ * max8997_muic_set_path - Set hardware line according to attached cable
+ * @info: the instance including private data of max8997 MUIC
+ * @value: the path according to attached cable
+ * @attached: the state of cable (true:attached, false:detached)
+ *
+ * The max8997 MUIC device share outside H/W line among a varity of cables,
+ * so this function set internal path of H/W line according to the type of
+ * attached cable.
+ */
+static int max8997_muic_set_path(struct max8997_muic_info *info,
+		u8 val, bool attached)
+{
+	int ret = 0;
+	u8 ctrl1, ctrl2 = 0;
+
+	if (attached)
+		ctrl1 = val;
+	else
+		ctrl1 = CONTROL1_SW_OPEN;
+
+	ret = max8997_update_reg(info->muic,
+			MAX8997_MUIC_REG_CONTROL1, ctrl1, COMP_SW_MASK);
+	if (ret < 0) {
+		dev_err(info->dev, "failed to update MUIC register\n");
+		return -EAGAIN;
+	}
+
+	if (attached)
+		ctrl2 |= CONTROL2_CPEN_MASK;	/* LowPwr=0, CPEn=1 */
+	else
+		ctrl2 |= CONTROL2_LOWPWR_MASK;	/* LowPwr=1, CPEn=0 */
+
+	ret = max8997_update_reg(info->muic,
+			MAX8997_MUIC_REG_CONTROL2, ctrl2,
+			CONTROL2_LOWPWR_MASK | CONTROL2_CPEN_MASK);
+	if (ret < 0) {
+		dev_err(info->dev, "failed to update MUIC register\n");
+		return -EAGAIN;
+	}
+
+	dev_info(info->dev,
+		"CONTROL1 : 0x%02x, CONTROL2 : 0x%02x, state : %s\n",
+		ctrl1, ctrl2, attached ? "attached" : "detached");
+
+	return 0;
+}
+
+/*
+ * max8997_muic_get_cable_type - Return cable type and check cable state
+ * @info: the instance including private data of max8997 MUIC
+ * @group: the path according to attached cable
+ * @attached: store cable state and return
+ *
+ * This function check the cable state either attached or detached,
+ * and then divide precise type of cable according to cable group.
+ *	- MAX8997_CABLE_GROUP_ADC
+ *	- MAX8997_CABLE_GROUP_CHG
+ */
+static int max8997_muic_get_cable_type(struct max8997_muic_info *info,
+		enum max8997_muic_cable_group group, bool *attached)
+{
+	int cable_type = 0;
+	int adc;
+	int chg_type;
+
+	switch (group) {
+	case MAX8997_CABLE_GROUP_ADC:
+		/*
+		 * Read ADC value to check cable type and decide cable state
+		 * according to cable type
+		 */
+		adc = info->status[0] & STATUS1_ADC_MASK;
+		adc >>= STATUS1_ADC_SHIFT;
+
+		/*
+		 * Check current cable state/cable type and store cable type
+		 * (info->prev_cable_type) for handling cable when cable is
+		 * detached.
+		 */
+		if (adc == MAX8997_MUIC_ADC_OPEN) {
+			*attached = false;
+
+			cable_type = info->prev_cable_type;
+			info->prev_cable_type = MAX8997_MUIC_ADC_OPEN;
+		} else {
+			*attached = true;
+
+			cable_type = info->prev_cable_type = adc;
+		}
+		break;
+	case MAX8997_CABLE_GROUP_CHG:
+		/*
+		 * Read charger type to check cable type and decide cable state
+		 * according to type of charger cable.
+		 */
+		chg_type = info->status[1] & STATUS2_CHGTYP_MASK;
+		chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+		if (chg_type == MAX8997_CHARGER_TYPE_NONE) {
+			*attached = false;
+
+			cable_type = info->prev_chg_type;
+			info->prev_chg_type = MAX8997_CHARGER_TYPE_NONE;
+		} else {
+			*attached = true;
+
+			/*
+			 * Check current cable state/cable type and store cable
+			 * type(info->prev_chg_type) for handling cable when
+			 * charger cable is detached.
+			 */
+			cable_type = info->prev_chg_type = chg_type;
+		}
+
+		break;
+	default:
+		dev_err(info->dev, "Unknown cable group (%d)\n", group);
+		cable_type = -EINVAL;
+		break;
+	}
+
+	return cable_type;
+}
+
 static int max8997_muic_handle_usb(struct max8997_muic_info *info,
 			enum max8997_muic_usb_type usb_type, bool attached)
 {
 	int ret = 0;
 
 	if (usb_type == MAX8997_USB_HOST) {
-		/* switch to USB */
-		ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
-				attached ? MAX8997_SW_USB : MAX8997_SW_OPEN,
-				SW_MASK);
-		if (ret) {
+		ret = max8997_muic_set_path(info, info->path_usb, attached);
+		if (ret < 0) {
 			dev_err(info->dev, "failed to update muic register\n");
-			goto out;
+			return ret;
 		}
 	}
 
@@ -148,41 +353,39 @@
 		extcon_set_cable_state(info->edev, "USB", attached);
 		break;
 	default:
-		ret = -EINVAL;
-		break;
+		dev_err(info->dev, "failed to detect %s usb cable\n",
+			attached ? "attached" : "detached");
+		return -EINVAL;
 	}
 
-out:
-	return ret;
+	return 0;
 }
 
 static int max8997_muic_handle_dock(struct max8997_muic_info *info,
-			int adc, bool attached)
+			int cable_type, bool attached)
 {
 	int ret = 0;
 
-	/* switch to AUDIO */
-	ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
-				attached ? MAX8997_SW_AUDIO : MAX8997_SW_OPEN,
-				SW_MASK);
+	ret = max8997_muic_set_path(info, CONTROL1_SW_AUDIO, attached);
 	if (ret) {
 		dev_err(info->dev, "failed to update muic register\n");
-		goto out;
+		return ret;
 	}
 
-	switch (adc) {
-	case MAX8997_ADC_DESKDOCK:
+	switch (cable_type) {
+	case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
 		extcon_set_cable_state(info->edev, "Dock-desk", attached);
 		break;
-	case MAX8997_ADC_CARDOCK:
+	case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
 		extcon_set_cable_state(info->edev, "Dock-card", attached);
 		break;
 	default:
-		ret = -EINVAL;
-		break;
+		dev_err(info->dev, "failed to detect %s dock device\n",
+			attached ? "attached" : "detached");
+		return -EINVAL;
 	}
-out:
-	return ret;
+
+	return 0;
 }
 
 static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
@@ -191,199 +394,188 @@
 	int ret = 0;
 
 	/* switch to UART */
-	ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
-				attached ? MAX8997_SW_UART : MAX8997_SW_OPEN,
-				SW_MASK);
+	ret = max8997_muic_set_path(info, info->path_uart, attached);
 	if (ret) {
 		dev_err(info->dev, "failed to update muic register\n");
-		goto out;
+		return -EINVAL;
 	}
 
 	extcon_set_cable_state(info->edev, "JIG", attached);
-out:
-	return ret;
+
+	return 0;
 }
 
-static int max8997_muic_handle_adc_detach(struct max8997_muic_info *info)
+static int max8997_muic_adc_handler(struct max8997_muic_info *info)
 {
+	int cable_type;
+	bool attached;
 	int ret = 0;
 
-	switch (info->pre_adc) {
-	case MAX8997_ADC_GROUND:
-		ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, false);
+	/* Check cable state which is either detached or attached */
+	cable_type = max8997_muic_get_cable_type(info,
+				MAX8997_CABLE_GROUP_ADC, &attached);
+
+	switch (cable_type) {
+	case MAX8997_MUIC_ADC_GROUND:
+		ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, attached);
+		if (ret < 0)
+			return ret;
 		break;
-	case MAX8997_ADC_MHL:
-		extcon_set_cable_state(info->edev, "MHL", false);
+	case MAX8997_MUIC_ADC_MHL:
+		extcon_set_cable_state(info->edev, "MHL", attached);
 		break;
-	case MAX8997_ADC_JIG_USB_1:
-	case MAX8997_ADC_JIG_USB_2:
-		ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, false);
+	case MAX8997_MUIC_ADC_FACTORY_MODE_USB_OFF:
+	case MAX8997_MUIC_ADC_FACTORY_MODE_USB_ON:
+		ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, attached);
+		if (ret < 0)
+			return ret;
 		break;
-	case MAX8997_ADC_DESKDOCK:
-	case MAX8997_ADC_CARDOCK:
-		ret = max8997_muic_handle_dock(info, info->pre_adc, false);
+	case MAX8997_MUIC_ADC_AV_CABLE_NOLOAD:
+	case MAX8997_MUIC_ADC_FACTORY_MODE_UART_ON:
+		ret = max8997_muic_handle_dock(info, cable_type, attached);
+		if (ret < 0)
+			return ret;
 		break;
-	case MAX8997_ADC_JIG_UART:
-		ret = max8997_muic_handle_jig_uart(info, false);
+	case MAX8997_MUIC_ADC_FACTORY_MODE_UART_OFF:
+		ret = max8997_muic_handle_jig_uart(info, attached);
 		break;
+	case MAX8997_MUIC_ADC_REMOTE_S1_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S2_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S3_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S4_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S5_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S6_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S7_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S8_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S9_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S10_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S11_BUTTON:
+	case MAX8997_MUIC_ADC_REMOTE_S12_BUTTON:
+	case MAX8997_MUIC_ADC_RESERVED_ACC_1:
+	case MAX8997_MUIC_ADC_RESERVED_ACC_2:
+	case MAX8997_MUIC_ADC_RESERVED_ACC_3:
+	case MAX8997_MUIC_ADC_RESERVED_ACC_4:
+	case MAX8997_MUIC_ADC_RESERVED_ACC_5:
+	case MAX8997_MUIC_ADC_CEA936_AUDIO:
+	case MAX8997_MUIC_ADC_PHONE_POWERED_DEV:
+	case MAX8997_MUIC_ADC_TTY_CONVERTER:
+	case MAX8997_MUIC_ADC_UART_CABLE:
+	case MAX8997_MUIC_ADC_CEA936A_TYPE1_CHG:
+	case MAX8997_MUIC_ADC_CEA936A_TYPE2_CHG:
+	case MAX8997_MUIC_ADC_AUDIO_MODE_REMOTE:
+		/*
+		 * This cable isn't used in general case if it is specially
+		 * needed to detect additional cable, should implement
+		 * proper operation when this cable is attached/detached.
+		 */
+		dev_info(info->dev,
+			"cable is %s but it isn't used (type:0x%x)\n",
+			attached ? "attached" : "detached", cable_type);
+		return -EAGAIN;
 	default:
-		break;
-	}
-
-	return ret;
-}
-
-static int max8997_muic_handle_adc(struct max8997_muic_info *info, int adc)
-{
-	int ret = 0;
-
-	switch (adc) {
-	case MAX8997_ADC_GROUND:
-		ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, true);
-		break;
-	case MAX8997_ADC_MHL:
-		extcon_set_cable_state(info->edev, "MHL", true);
-		break;
-	case MAX8997_ADC_JIG_USB_1:
-	case MAX8997_ADC_JIG_USB_2:
-		ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, true);
-		break;
-	case MAX8997_ADC_DESKDOCK:
-	case MAX8997_ADC_CARDOCK:
-		ret = max8997_muic_handle_dock(info, adc, true);
-		break;
-	case MAX8997_ADC_JIG_UART:
-		ret = max8997_muic_handle_jig_uart(info, true);
-		break;
-	case MAX8997_ADC_OPEN:
-		ret = max8997_muic_handle_adc_detach(info);
-		break;
-	default:
-		ret = -EINVAL;
-		goto out;
-	}
-
-	info->pre_adc = adc;
-out:
-	return ret;
-}
-
-static int max8997_muic_handle_charger_type_detach(
-				struct max8997_muic_info *info)
-{
-	switch (info->pre_charger_type) {
-	case MAX8997_CHARGER_TYPE_USB:
-		extcon_set_cable_state(info->edev, "USB", false);
-		break;
-	case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
-		extcon_set_cable_state(info->edev, "Charge-downstream", false);
-		break;
-	case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
-		extcon_set_cable_state(info->edev, "TA", false);
-		break;
-	case MAX8997_CHARGER_TYPE_500MA:
-		extcon_set_cable_state(info->edev, "Slow-charger", false);
-		break;
-	case MAX8997_CHARGER_TYPE_1A:
-		extcon_set_cable_state(info->edev, "Fast-charger", false);
-		break;
-	default:
+		dev_err(info->dev,
+			"failed to detect %s unknown cable (type:0x%x)\n",
+			attached ? "attached" : "detached", cable_type);
 		return -EINVAL;
-		break;
 	}
 
 	return 0;
 }
 
-static int max8997_muic_handle_charger_type(struct max8997_muic_info *info,
-				enum max8997_muic_charger_type charger_type)
+static int max8997_muic_chg_handler(struct max8997_muic_info *info)
 {
-	u8 adc;
-	int ret;
+	int chg_type;
+	bool attached;
+	int adc;
 
-	ret = max8997_read_reg(info->muic, MAX8997_MUIC_REG_STATUS1, &adc);
-	if (ret) {
-		dev_err(info->dev, "failed to read muic register\n");
-		goto out;
-	}
+	chg_type = max8997_muic_get_cable_type(info,
+				MAX8997_CABLE_GROUP_CHG, &attached);
 
-	switch (charger_type) {
+	switch (chg_type) {
 	case MAX8997_CHARGER_TYPE_NONE:
-		ret = max8997_muic_handle_charger_type_detach(info);
 		break;
 	case MAX8997_CHARGER_TYPE_USB:
-		if ((adc & STATUS1_ADC_MASK) == MAX8997_ADC_OPEN) {
+		adc = info->status[0] & STATUS1_ADC_MASK;
+		adc >>= STATUS1_ADC_SHIFT;
+
+		if ((adc & STATUS1_ADC_MASK) == MAX8997_MUIC_ADC_OPEN) {
 			max8997_muic_handle_usb(info,
-					MAX8997_USB_DEVICE, true);
+					MAX8997_USB_DEVICE, attached);
 		}
 		break;
 	case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
-		extcon_set_cable_state(info->edev, "Charge-downstream", true);
+		extcon_set_cable_state(info->edev, "Charge-downstream", attached);
 		break;
 	case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
-		extcon_set_cable_state(info->edev, "TA", true);
+		extcon_set_cable_state(info->edev, "TA", attached);
 		break;
 	case MAX8997_CHARGER_TYPE_500MA:
-		extcon_set_cable_state(info->edev, "Slow-charger", true);
+		extcon_set_cable_state(info->edev, "Slow-charger", attached);
 		break;
 	case MAX8997_CHARGER_TYPE_1A:
-		extcon_set_cable_state(info->edev, "Fast-charger", true);
+		extcon_set_cable_state(info->edev, "Fast-charger", attached);
 		break;
 	default:
-		ret = -EINVAL;
-		goto out;
+		dev_err(info->dev,
+			"failed to detect %s unknown chg cable (type:0x%x)\n",
+			attached ? "attached" : "detached", chg_type);
+		return -EINVAL;
 	}
 
-	info->pre_charger_type = charger_type;
-out:
-	return ret;
+	return 0;
 }
 
 static void max8997_muic_irq_work(struct work_struct *work)
 {
 	struct max8997_muic_info *info = container_of(work,
 			struct max8997_muic_info, irq_work);
-	u8 status[2];
-	u8 adc, chg_type;
 	int irq_type = 0;
 	int i, ret;
 
+	if (!info->edev)
+		return;
+
 	mutex_lock(&info->mutex);
 
+	for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
+		if (info->irq == muic_irqs[i].virq)
+			irq_type = muic_irqs[i].irq;
+
 	ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
-				2, status);
+				2, info->status);
 	if (ret) {
 		dev_err(info->dev, "failed to read muic register\n");
 		mutex_unlock(&info->mutex);
 		return;
 	}
 
-	dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
-			status[0], status[1]);
-
-	for (i = 0 ; i < ARRAY_SIZE(muic_irqs) ; i++)
-		if (info->irq == muic_irqs[i].virq)
-			irq_type = muic_irqs[i].irq;
-
 	switch (irq_type) {
+	case MAX8997_MUICIRQ_ADCError:
+	case MAX8997_MUICIRQ_ADCLow:
 	case MAX8997_MUICIRQ_ADC:
-		adc = status[0] & STATUS1_ADC_MASK;
-		adc >>= STATUS1_ADC_SHIFT;
-
-		max8997_muic_handle_adc(info, adc);
+		/* Handle all of cable except for charger cable */
+		ret = max8997_muic_adc_handler(info);
 		break;
+	case MAX8997_MUICIRQ_VBVolt:
+	case MAX8997_MUICIRQ_DBChg:
+	case MAX8997_MUICIRQ_DCDTmr:
+	case MAX8997_MUICIRQ_ChgDetRun:
 	case MAX8997_MUICIRQ_ChgTyp:
-		chg_type = status[1] & STATUS2_CHGTYP_MASK;
-		chg_type >>= STATUS2_CHGTYP_SHIFT;
-
-		max8997_muic_handle_charger_type(info, chg_type);
+		/* Handle charger cable */
+		ret = max8997_muic_chg_handler(info);
+		break;
+	case MAX8997_MUICIRQ_OVP:
 		break;
 	default:
 		dev_info(info->dev, "misc interrupt: irq %d occurred\n",
 				irq_type);
-		break;
+		mutex_unlock(&info->mutex);
+		return;
 	}
 
+	if (ret < 0)
+		dev_err(info->dev, "failed to handle MUIC interrupt\n");
+
 	mutex_unlock(&info->mutex);
 
 	return;
@@ -401,29 +593,60 @@
 	return IRQ_HANDLED;
 }
 
-static void max8997_muic_detect_dev(struct max8997_muic_info *info)
+static int max8997_muic_detect_dev(struct max8997_muic_info *info)
 {
-	int ret;
-	u8 status[2], adc, chg_type;
+	int ret = 0;
+	int adc;
+	int chg_type;
+	bool attached;
 
-	ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
-				2, status);
+	mutex_lock(&info->mutex);
+
+	/* Read STATUSx register to detect accessory */
+	ret = max8997_bulk_read(info->muic,
+			MAX8997_MUIC_REG_STATUS1, 2, info->status);
 	if (ret) {
-		dev_err(info->dev, "failed to read muic register\n");
-		return;
+		dev_err(info->dev, "failed to read MUIC register\n");
+		mutex_unlock(&info->mutex);
+		return -EINVAL;
 	}
 
-	dev_info(info->dev, "STATUS1:0x%x, STATUS2:0x%x\n",
-			status[0], status[1]);
+	adc = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_ADC,
+					&attached);
+	if (attached && adc != MAX8997_MUIC_ADC_OPEN) {
+		ret = max8997_muic_adc_handler(info);
+		if (ret < 0) {
+			dev_err(info->dev, "Cannot detect ADC cable\n");
+			mutex_unlock(&info->mutex);
+			return ret;
+		}
+	}
 
-	adc = status[0] & STATUS1_ADC_MASK;
-	adc >>= STATUS1_ADC_SHIFT;
+	chg_type = max8997_muic_get_cable_type(info, MAX8997_CABLE_GROUP_CHG,
+					&attached);
+	if (attached && chg_type != MAX8997_CHARGER_TYPE_NONE) {
+		ret = max8997_muic_chg_handler(info);
+		if (ret < 0) {
+			dev_err(info->dev, "Cannot detect charger cable\n");
+			mutex_unlock(&info->mutex);
+			return ret;
+		}
+	}
 
-	chg_type = status[1] & STATUS2_CHGTYP_MASK;
-	chg_type >>= STATUS2_CHGTYP_SHIFT;
+	mutex_unlock(&info->mutex);
 
-	max8997_muic_handle_adc(info, adc);
-	max8997_muic_handle_charger_type(info, chg_type);
+	return 0;
+}
+
+static void max8997_muic_detect_cable_wq(struct work_struct *work)
+{
+	struct max8997_muic_info *info = container_of(to_delayed_work(work),
+				struct max8997_muic_info, wq_detcable);
+	int ret;
+
+	ret = max8997_muic_detect_dev(info);
+	if (ret < 0)
+		pr_err("failed to detect cable type\n");
 }
 
 static int max8997_muic_probe(struct platform_device *pdev)
@@ -431,6 +654,7 @@
 	struct max8997_dev *max8997 = dev_get_drvdata(pdev->dev.parent);
 	struct max8997_platform_data *pdata = dev_get_platdata(max8997->dev);
 	struct max8997_muic_info *info;
+	int delay_jiffies;
 	int ret, i;
 
 	info = devm_kzalloc(&pdev->dev, sizeof(struct max8997_muic_info),
@@ -459,8 +683,10 @@
 		}
 		muic_irq->virq = virq;
 
-		ret = request_threaded_irq(virq, NULL, max8997_muic_irq_handler,
-				0, muic_irq->name, info);
+		ret = request_threaded_irq(virq, NULL,
+				max8997_muic_irq_handler,
+				IRQF_NO_SUSPEND,
+				muic_irq->name, info);
 		if (ret) {
 			dev_err(&pdev->dev,
 				"failed: irq request (IRQ: %d,"
@@ -496,10 +722,42 @@
 		}
 	}
 
-	/* Initial device detection */
-	max8997_muic_detect_dev(info);
+	/*
+	 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+	 * h/w path of COMP2/COMN1 on CONTROL1 register.
+	 */
+	if (pdata->muic_pdata->path_uart)
+		info->path_uart = pdata->muic_pdata->path_uart;
+	else
+		info->path_uart = CONTROL1_SW_UART;
 
-	return ret;
+	if (pdata->muic_pdata->path_usb)
+		info->path_usb = pdata->muic_pdata->path_usb;
+	else
+		info->path_usb = CONTROL1_SW_USB;
+
+	/* Set initial path for UART */
+	 max8997_muic_set_path(info, info->path_uart, true);
+
+	/* Set ADC debounce time */
+	max8997_muic_set_debounce_time(info, ADC_DEBOUNCE_TIME_25MS);
+
+	/*
+	 * Detect accessory after completing the initialization of platform
+	 *
+	 * - Use delayed workqueue to detect cable state and then
+	 * notify cable state to notifiee/platform through uevent.
+	 * After completing the booting of platform, the extcon provider
+	 * driver should notify cable state to upper layer.
+	 */
+	INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
+	if (pdata->muic_pdata->detcable_delay_ms)
+		delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
+	else
+		delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
+	schedule_delayed_work(&info->wq_detcable, delay_jiffies);
+
+	return 0;
 
 err_irq:
 	while (--i >= 0)
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 3d62781..aa3fec0 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -568,8 +568,7 @@
 
 static const struct hv_vmbus_device_id id_table[] = {
 	/* Mouse guid */
-	{ VMBUS_DEVICE(0x9E, 0xB6, 0xA8, 0xCF, 0x4A, 0x5B, 0xc0, 0x4c,
-		       0xB9, 0x8B, 0x8B, 0xA1, 0xA1, 0xF3, 0xF9, 0x5A) },
+	{ HV_MOUSE_GUID, },
 	{ },
 };
 
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 773a2f2..0b122f8 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -55,7 +55,7 @@
 					[channel->monitor_grp].pending);
 
 	} else {
-		vmbus_set_event(channel->offermsg.child_relid);
+		vmbus_set_event(channel);
 	}
 }
 
@@ -181,7 +181,7 @@
 	open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
 	open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
 						  PAGE_SHIFT;
-	open_msg->server_contextarea_gpadlhandle = 0;
+	open_msg->target_vp = newchannel->target_vp;
 
 	if (userdatalen > MAX_USER_DEFINED_BYTES) {
 		err = -EINVAL;
@@ -564,6 +564,7 @@
 	struct scatterlist bufferlist[3];
 	u64 aligned_data = 0;
 	int ret;
+	bool signal = false;
 
 
 	/* Setup the descriptor */
@@ -580,9 +581,9 @@
 	sg_set_buf(&bufferlist[2], &aligned_data,
 		   packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
 
-	if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+	if (ret == 0 && signal)
 		vmbus_setevent(channel);
 
 	return ret;
@@ -606,6 +607,7 @@
 	u32 packetlen_aligned;
 	struct scatterlist bufferlist[3];
 	u64 aligned_data = 0;
+	bool signal = false;
 
 	if (pagecount > MAX_PAGE_BUFFER_COUNT)
 		return -EINVAL;
@@ -641,9 +643,9 @@
 	sg_set_buf(&bufferlist[2], &aligned_data,
 		packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
 
-	if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+	if (ret == 0 && signal)
 		vmbus_setevent(channel);
 
 	return ret;
@@ -665,6 +667,7 @@
 	u32 packetlen_aligned;
 	struct scatterlist bufferlist[3];
 	u64 aligned_data = 0;
+	bool signal = false;
 	u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
 					 multi_pagebuffer->len);
 
@@ -703,9 +706,9 @@
 	sg_set_buf(&bufferlist[2], &aligned_data,
 		packetlen_aligned - packetlen);
 
-	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
+	ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
 
-	if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
+	if (ret == 0 && signal)
 		vmbus_setevent(channel);
 
 	return ret;
@@ -732,6 +735,7 @@
 	u32 packetlen;
 	u32 userlen;
 	int ret;
+	bool signal = false;
 
 	*buffer_actual_len = 0;
 	*requestid = 0;
@@ -758,8 +762,10 @@
 
 	/* Copy over the packet to the user buffer */
 	ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
-			     (desc.offset8 << 3));
+			     (desc.offset8 << 3), &signal);
 
+	if (signal)
+		vmbus_setevent(channel);
 
 	return 0;
 }
@@ -774,8 +780,8 @@
 {
 	struct vmpacket_descriptor desc;
 	u32 packetlen;
-	u32 userlen;
 	int ret;
+	bool signal = false;
 
 	*buffer_actual_len = 0;
 	*requestid = 0;
@@ -788,7 +794,6 @@
 
 
 	packetlen = desc.len8 << 3;
-	userlen = packetlen - (desc.offset8 << 3);
 
 	*buffer_actual_len = packetlen;
 
@@ -802,7 +807,11 @@
 	*requestid = desc.trans_id;
 
 	/* Copy over the entire packet to the user buffer */
-	ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
+	ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
+				 &signal);
+
+	if (signal)
+		vmbus_setevent(channel);
 
 	return 0;
 }
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f84c5c..53a8600 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -257,6 +257,70 @@
 	}
 }
 
+enum {
+	IDE = 0,
+	SCSI,
+	NIC,
+	MAX_PERF_CHN,
+};
+
+/*
+ * This is an array of device_ids (device types) that are performance critical.
+ * We attempt to distribute the interrupt load for these devices across
+ * all available CPUs.
+ */
+static const struct hv_vmbus_device_id hp_devs[] = {
+	/* IDE */
+	{ HV_IDE_GUID, },
+	/* Storage - SCSI */
+	{ HV_SCSI_GUID, },
+	/* Network */
+	{ HV_NIC_GUID, },
+};
+
+
+/*
+ * We use this state to statically distribute the channel interrupt load.
+ */
+static u32  next_vp;
+
+/*
+ * Starting with Win8, we can statically distribute the incoming
+ * channel interrupt load by binding a channel to VCPU. We
+ * implement here a simple round robin scheme for distributing
+ * the interrupt load.
+ * We will bind channels that are not performance critical to cpu 0 and
+ * performance critical channels (IDE, SCSI and Network) will be uniformly
+ * distributed across all available CPUs.
+ */
+static u32 get_vp_index(uuid_le *type_guid)
+{
+	u32 cur_cpu;
+	int i;
+	bool perf_chn = false;
+	u32 max_cpus = num_online_cpus();
+
+	for (i = IDE; i < MAX_PERF_CHN; i++) {
+		if (!memcmp(type_guid->b, hp_devs[i].guid,
+				 sizeof(uuid_le))) {
+			perf_chn = true;
+			break;
+		}
+	}
+	if ((vmbus_proto_version == VERSION_WS2008) ||
+	    (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
+		/*
+		 * Prior to win8, all channel interrupts are
+		 * delivered on cpu 0.
+		 * Also if the channel is not a performance critical
+		 * channel, bind it to cpu 0.
+		 */
+		return 0;
+	}
+	cur_cpu = (++next_vp % max_cpus);
+	return 0;
+}
+
 /*
  * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
  *
@@ -275,6 +339,35 @@
 		return;
 	}
 
+	/*
+	 * By default we setup state to enable batched
+	 * reading. A specific service can choose to
+	 * disable this prior to opening the channel.
+	 */
+	newchannel->batched_reading = true;
+
+	/*
+	 * Setup state for signalling the host.
+	 */
+	newchannel->sig_event = (struct hv_input_signal_event *)
+				(ALIGN((unsigned long)
+				&newchannel->sig_buf,
+				HV_HYPERCALL_PARAM_ALIGN));
+
+	newchannel->sig_event->connectionid.asu32 = 0;
+	newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
+	newchannel->sig_event->flag_number = 0;
+	newchannel->sig_event->rsvdz = 0;
+
+	if (vmbus_proto_version != VERSION_WS2008) {
+		newchannel->is_dedicated_interrupt =
+				(offer->is_dedicated_interrupt != 0);
+		newchannel->sig_event->connectionid.u.id =
+				offer->connection_id;
+	}
+
+	newchannel->target_vp = get_vp_index(&offer->offer.if_type);
+
 	memcpy(&newchannel->offermsg, offer,
 	       sizeof(struct vmbus_channel_offer_channel));
 	newchannel->monitor_grp = (u8)offer->monitorid / 32;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 650c9f0..253a74b 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/hyperv.h>
+#include <linux/export.h>
 #include <asm/hyperv.h>
 #include "hyperv_vmbus.h"
 
@@ -40,15 +41,99 @@
 };
 
 /*
+ * Negotiated protocol version with the host.
+ */
+__u32 vmbus_proto_version;
+EXPORT_SYMBOL_GPL(vmbus_proto_version);
+
+static __u32 vmbus_get_next_version(__u32 current_version)
+{
+	switch (current_version) {
+	case (VERSION_WIN7):
+		return VERSION_WS2008;
+
+	case (VERSION_WIN8):
+		return VERSION_WIN7;
+
+	case (VERSION_WS2008):
+	default:
+		return VERSION_INVAL;
+	}
+}
+
+static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
+					__u32 version)
+{
+	int ret = 0;
+	struct vmbus_channel_initiate_contact *msg;
+	unsigned long flags;
+	int t;
+
+	init_completion(&msginfo->waitevent);
+
+	msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
+
+	msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
+	msg->vmbus_version_requested = version;
+	msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
+	msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
+	msg->monitor_page2 = virt_to_phys(
+			(void *)((unsigned long)vmbus_connection.monitor_pages +
+				 PAGE_SIZE));
+
+	/*
+	 * Add to list before we send the request since we may
+	 * receive the response before returning from this routine
+	 */
+	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+	list_add_tail(&msginfo->msglistentry,
+		      &vmbus_connection.chn_msg_list);
+
+	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+	ret = vmbus_post_msg(msg,
+			       sizeof(struct vmbus_channel_initiate_contact));
+	if (ret != 0) {
+		spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+		list_del(&msginfo->msglistentry);
+		spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+					flags);
+		return ret;
+	}
+
+	/* Wait for the connection response */
+	t =  wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
+	if (t == 0) {
+		spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
+				flags);
+		list_del(&msginfo->msglistentry);
+		spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
+					flags);
+		return -ETIMEDOUT;
+	}
+
+	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
+	list_del(&msginfo->msglistentry);
+	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+
+	/* Check if successful */
+	if (msginfo->response.version_response.version_supported) {
+		vmbus_connection.conn_state = CONNECTED;
+	} else {
+		return -ECONNREFUSED;
+	}
+
+	return ret;
+}
+
+/*
  * vmbus_connect - Sends a connect request on the partition service connection
  */
 int vmbus_connect(void)
 {
 	int ret = 0;
-	int t;
 	struct vmbus_channel_msginfo *msginfo = NULL;
-	struct vmbus_channel_initiate_contact *msg;
-	unsigned long flags;
+	__u32 version;
 
 	/* Initialize the vmbus connection */
 	vmbus_connection.conn_state = CONNECTING;
@@ -99,69 +184,38 @@
 		goto cleanup;
 	}
 
-	init_completion(&msginfo->waitevent);
-
-	msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
-
-	msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
-	msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
-	msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
-	msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
-	msg->monitor_page2 = virt_to_phys(
-			(void *)((unsigned long)vmbus_connection.monitor_pages +
-				 PAGE_SIZE));
-
 	/*
-	 * Add to list before we send the request since we may
-	 * receive the response before returning from this routine
+	 * Negotiate a compatible VMBUS version number with the
+	 * host. We start with the highest number we can support
+	 * and work our way down until we negotiate a compatible
+	 * version.
 	 */
-	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
-	list_add_tail(&msginfo->msglistentry,
-		      &vmbus_connection.chn_msg_list);
 
-	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+	version = VERSION_CURRENT;
 
-	ret = vmbus_post_msg(msg,
-			       sizeof(struct vmbus_channel_initiate_contact));
-	if (ret != 0) {
-		spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
-		list_del(&msginfo->msglistentry);
-		spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
-					flags);
+	do {
+		ret = vmbus_negotiate_version(msginfo, version);
+		if (ret == 0)
+			break;
+
+		version = vmbus_get_next_version(version);
+	} while (version != VERSION_INVAL);
+
+	if (version == VERSION_INVAL)
 		goto cleanup;
-	}
 
-	/* Wait for the connection response */
-	t =  wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
-	if (t == 0) {
-		spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
-				flags);
-		list_del(&msginfo->msglistentry);
-		spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
-					flags);
-		ret = -ETIMEDOUT;
-		goto cleanup;
-	}
-
-	spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
-	list_del(&msginfo->msglistentry);
-	spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
-	/* Check if successful */
-	if (msginfo->response.version_response.version_supported) {
-		vmbus_connection.conn_state = CONNECTED;
-	} else {
-		pr_err("Unable to connect, "
-			"Version %d not supported by Hyper-V\n",
-			VMBUS_REVISION_NUMBER);
-		ret = -ECONNREFUSED;
-		goto cleanup;
-	}
+	vmbus_proto_version = version;
+	pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
+		    host_info_eax, host_info_ebx >> 16,
+		    host_info_ebx & 0xFFFF, host_info_ecx,
+		    host_info_edx >> 24, host_info_edx & 0xFFFFFF,
+		    version >> 16, version & 0xFFFF);
 
 	kfree(msginfo);
 	return 0;
 
 cleanup:
+	pr_err("Unable to connect to host\n");
 	vmbus_connection.conn_state = DISCONNECTED;
 
 	if (vmbus_connection.work_queue)
@@ -212,6 +266,9 @@
 {
 	struct vmbus_channel *channel;
 	unsigned long flags;
+	void *arg;
+	bool read_state;
+	u32 bytes_to_read;
 
 	/*
 	 * Find the channel based on this relid and invokes the
@@ -234,10 +291,29 @@
 	 */
 
 	spin_lock_irqsave(&channel->inbound_lock, flags);
-	if (channel->onchannel_callback != NULL)
-		channel->onchannel_callback(channel->channel_callback_context);
-	else
+	if (channel->onchannel_callback != NULL) {
+		arg = channel->channel_callback_context;
+		read_state = channel->batched_reading;
+		/*
+		 * This callback reads the messages sent by the host.
+		 * We can optimize host to guest signaling by ensuring:
+		 * 1. While reading the channel, we disable interrupts from
+		 *    host.
+		 * 2. Ensure that we process all posted messages from the host
+		 *    before returning from this callback.
+		 * 3. Once we return, enable signaling from the host. Once this
+		 *    state is set we check to see if additional packets are
+		 *    available to read. In this case we repeat the process.
+		 */
+
+		do {
+			hv_begin_read(&channel->inbound);
+			channel->onchannel_callback(arg);
+			bytes_to_read = hv_end_read(&channel->inbound);
+		} while (read_state && (bytes_to_read != 0));
+	} else {
 		pr_err("no channel callback for relid - %u\n", relid);
+	}
 
 	spin_unlock_irqrestore(&channel->inbound_lock, flags);
 }
@@ -248,10 +324,32 @@
 void vmbus_on_event(unsigned long data)
 {
 	u32 dword;
-	u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+	u32 maxdword;
 	int bit;
 	u32 relid;
-	u32 *recv_int_page = vmbus_connection.recv_int_page;
+	u32 *recv_int_page = NULL;
+	void *page_addr;
+	int cpu = smp_processor_id();
+	union hv_synic_event_flags *event;
+
+	if ((vmbus_proto_version == VERSION_WS2008) ||
+		(vmbus_proto_version == VERSION_WIN7)) {
+		maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
+		recv_int_page = vmbus_connection.recv_int_page;
+	} else {
+		/*
+		 * When the host is win8 and beyond, the event page
+		 * can be directly checked to get the id of the channel
+		 * that has the interrupt pending.
+		 */
+		maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
+		page_addr = hv_context.synic_event_page[cpu];
+		event = (union hv_synic_event_flags *)page_addr +
+						 VMBUS_MESSAGE_SINT;
+		recv_int_page = event->flags32;
+	}
+
+
 
 	/* Check events */
 	if (!recv_int_page)
@@ -307,12 +405,16 @@
 /*
  * vmbus_set_event - Send an event notification to the parent
  */
-int vmbus_set_event(u32 child_relid)
+int vmbus_set_event(struct vmbus_channel *channel)
 {
-	/* Each u32 represents 32 channels */
-	sync_set_bit(child_relid & 31,
-		(unsigned long *)vmbus_connection.send_int_page +
-		(child_relid >> 5));
+	u32 child_relid = channel->offermsg.child_relid;
 
-	return hv_signal_event();
+	if (!channel->is_dedicated_interrupt) {
+		/* Each u32 represents 32 channels */
+		sync_set_bit(child_relid & 31,
+			(unsigned long *)vmbus_connection.send_int_page +
+			(child_relid >> 5));
+	}
+
+	return hv_signal_event(channel->sig_event);
 }
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 3648f8f..1c5481d 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -27,6 +27,7 @@
 #include <linux/vmalloc.h>
 #include <linux/hyperv.h>
 #include <linux/version.h>
+#include <linux/interrupt.h>
 #include <asm/hyperv.h>
 #include "hyperv_vmbus.h"
 
@@ -34,13 +35,16 @@
 struct hv_context hv_context = {
 	.synic_initialized	= false,
 	.hypercall_page		= NULL,
-	.signal_event_param	= NULL,
-	.signal_event_buffer	= NULL,
 };
 
 /*
  * query_hypervisor_info - Get version info of the windows hypervisor
  */
+unsigned int host_info_eax;
+unsigned int host_info_ebx;
+unsigned int host_info_ecx;
+unsigned int host_info_edx;
+
 static int query_hypervisor_info(void)
 {
 	unsigned int eax;
@@ -70,13 +74,10 @@
 		edx = 0;
 		op = HVCPUID_VERSION;
 		cpuid(op, &eax, &ebx, &ecx, &edx);
-		pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
-			    eax,
-			    ebx >> 16,
-			    ebx & 0xFFFF,
-			    ecx,
-			    edx >> 24,
-			    edx & 0xFFFFFF);
+		host_info_eax = eax;
+		host_info_ebx = ebx;
+		host_info_ecx = ecx;
+		host_info_edx = edx;
 	}
 	return max_leaf;
 }
@@ -137,6 +138,10 @@
 	memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
 	memset(hv_context.synic_message_page, 0,
 	       sizeof(void *) * NR_CPUS);
+	memset(hv_context.vp_index, 0,
+	       sizeof(int) * NR_CPUS);
+	memset(hv_context.event_dpc, 0,
+	       sizeof(void *) * NR_CPUS);
 
 	max_leaf = query_hypervisor_info();
 
@@ -168,24 +173,6 @@
 
 	hv_context.hypercall_page = virtaddr;
 
-	/* Setup the global signal event param for the signal event hypercall */
-	hv_context.signal_event_buffer =
-			kmalloc(sizeof(struct hv_input_signal_event_buffer),
-				GFP_KERNEL);
-	if (!hv_context.signal_event_buffer)
-		goto cleanup;
-
-	hv_context.signal_event_param =
-		(struct hv_input_signal_event *)
-			(ALIGN((unsigned long)
-				  hv_context.signal_event_buffer,
-				  HV_HYPERCALL_PARAM_ALIGN));
-	hv_context.signal_event_param->connectionid.asu32 = 0;
-	hv_context.signal_event_param->connectionid.u.id =
-						VMBUS_EVENT_CONNECTION_ID;
-	hv_context.signal_event_param->flag_number = 0;
-	hv_context.signal_event_param->rsvdz = 0;
-
 	return 0;
 
 cleanup:
@@ -213,10 +200,6 @@
 	/* Reset our OS id */
 	wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
 
-	kfree(hv_context.signal_event_buffer);
-	hv_context.signal_event_buffer = NULL;
-	hv_context.signal_event_param = NULL;
-
 	if (hv_context.hypercall_page) {
 		hypercall_msr.as_uint64 = 0;
 		wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -273,13 +256,12 @@
  *
  * This involves a hypercall.
  */
-u16 hv_signal_event(void)
+u16 hv_signal_event(void *con_id)
 {
 	u16 status;
 
-	status = do_hypercall(HVCALL_SIGNAL_EVENT,
-			       hv_context.signal_event_param,
-			       NULL) & 0xFFFF;
+	status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF);
+
 	return status;
 }
 
@@ -297,6 +279,7 @@
 	union hv_synic_siefp siefp;
 	union hv_synic_sint shared_sint;
 	union hv_synic_scontrol sctrl;
+	u64 vp_index;
 
 	u32 irq_vector = *((u32 *)(irqarg));
 	int cpu = smp_processor_id();
@@ -307,6 +290,15 @@
 	/* Check the version */
 	rdmsrl(HV_X64_MSR_SVERSION, version);
 
+	hv_context.event_dpc[cpu] = (struct tasklet_struct *)
+					kmalloc(sizeof(struct tasklet_struct),
+						GFP_ATOMIC);
+	if (hv_context.event_dpc[cpu] == NULL) {
+		pr_err("Unable to allocate event dpc\n");
+		goto cleanup;
+	}
+	tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
+
 	hv_context.synic_message_page[cpu] =
 		(void *)get_zeroed_page(GFP_ATOMIC);
 
@@ -345,7 +337,7 @@
 	shared_sint.as_uint64 = 0;
 	shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
 	shared_sint.masked = false;
-	shared_sint.auto_eoi = false;
+	shared_sint.auto_eoi = true;
 
 	wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
 
@@ -356,6 +348,14 @@
 	wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
 
 	hv_context.synic_initialized = true;
+
+	/*
+	 * Setup the mapping between Hyper-V's notion
+	 * of cpuid and Linux' notion of cpuid.
+	 * This array will be indexed using Linux cpuid.
+	 */
+	rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
+	hv_context.vp_index[cpu] = (u32)vp_index;
 	return;
 
 cleanup:
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index dd289fd..3787321 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -29,7 +29,6 @@
 #include <linux/memory_hotplug.h>
 #include <linux/memory.h>
 #include <linux/notifier.h>
-#include <linux/mman.h>
 #include <linux/percpu_counter.h>
 
 #include <linux/hyperv.h>
@@ -415,10 +414,17 @@
 
 static bool hot_add;
 static bool do_hot_add;
+/*
+ * Delay reporting memory pressure by
+ * the specified number of seconds.
+ */
+static uint pressure_report_delay = 30;
 
 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
 
+module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
 static atomic_t trans_id = ATOMIC_INIT(0);
 
 static int dm_ring_size = (5 * PAGE_SIZE);
@@ -517,6 +523,34 @@
 	}
 }
 
+unsigned long compute_balloon_floor(void)
+{
+	unsigned long min_pages;
+#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
+	/* Simple continuous piecewiese linear function:
+	 *  max MiB -> min MiB  gradient
+	 *       0         0
+	 *      16        16
+	 *      32        24
+	 *     128        72    (1/2)
+	 *     512       168    (1/4)
+	 *    2048       360    (1/8)
+	 *    8192       552    (1/32)
+	 *   32768      1320
+	 *  131072      4392
+	 */
+	if (totalram_pages < MB2PAGES(128))
+		min_pages = MB2PAGES(8) + (totalram_pages >> 1);
+	else if (totalram_pages < MB2PAGES(512))
+		min_pages = MB2PAGES(40) + (totalram_pages >> 2);
+	else if (totalram_pages < MB2PAGES(2048))
+		min_pages = MB2PAGES(104) + (totalram_pages >> 3);
+	else
+		min_pages = MB2PAGES(296) + (totalram_pages >> 5);
+#undef MB2PAGES
+	return min_pages;
+}
+
 /*
  * Post our status as it relates memory pressure to the
  * host. Host expects the guests to post this status
@@ -530,15 +564,30 @@
 static void post_status(struct hv_dynmem_device *dm)
 {
 	struct dm_status status;
+	struct sysinfo val;
 
-
+	if (pressure_report_delay > 0) {
+		--pressure_report_delay;
+		return;
+	}
+	si_meminfo(&val);
 	memset(&status, 0, sizeof(struct dm_status));
 	status.hdr.type = DM_STATUS_REPORT;
 	status.hdr.size = sizeof(struct dm_status);
 	status.hdr.trans_id = atomic_inc_return(&trans_id);
 
-
-	status.num_committed = vm_memory_committed();
+	/*
+	 * The host expects the guest to report free memory.
+	 * Further, the host expects the pressure information to
+	 * include the ballooned out pages.
+	 * For a given amount of memory that we are managing, we
+	 * need to compute a floor below which we should not balloon.
+	 * Compute this and add it to the pressure report.
+	 */
+	status.num_avail = val.freeram;
+	status.num_committed = vm_memory_committed() +
+				dm->num_pages_ballooned +
+				compute_balloon_floor();
 
 	vmbus_sendpacket(dm->dev->channel, &status,
 				sizeof(struct dm_status),
@@ -547,8 +596,6 @@
 
 }
 
-
-
 static void free_balloon_pages(struct hv_dynmem_device *dm,
 			 union dm_mem_page_range *range_array)
 {
@@ -1013,9 +1060,7 @@
 static const struct hv_vmbus_device_id id_table[] = {
 	/* Dynamic Memory Class ID */
 	/* 525074DC-8985-46e2-8057-A307DC18A502 */
-	{ VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46,
-		       0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
-	},
+	{ HV_DM_GUID, },
 	{ },
 };
 
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index a0667de..1d4cbd8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -49,6 +49,16 @@
 	.util_deinit = hv_kvp_deinit,
 };
 
+static void perform_shutdown(struct work_struct *dummy)
+{
+	orderly_poweroff(true);
+}
+
+/*
+ * Perform the shutdown operation in a thread context.
+ */
+static DECLARE_WORK(shutdown_work, perform_shutdown);
+
 static void shutdown_onchannelcallback(void *context)
 {
 	struct vmbus_channel *channel = context;
@@ -106,7 +116,7 @@
 	}
 
 	if (execute_shutdown == true)
-		orderly_poweroff(true);
+		schedule_work(&shutdown_work);
 }
 
 /*
@@ -274,6 +284,16 @@
 		}
 	}
 
+	/*
+	 * The set of services managed by the util driver are not performance
+	 * critical and do not need batched reading. Furthermore, some services
+	 * such as KVP can only handle one message from the host at a time.
+	 * Turn off batched reading for all util drivers before we open the
+	 * channel.
+	 */
+
+	set_channel_read_state(dev->channel, false);
+
 	ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
 			srv->util_cb, dev->channel);
 	if (ret)
@@ -304,21 +324,21 @@
 
 static const struct hv_vmbus_device_id id_table[] = {
 	/* Shutdown guid */
-	{ VMBUS_DEVICE(0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49,
-		       0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB)
-	  .driver_data = (unsigned long)&util_shutdown },
+	{ HV_SHUTDOWN_GUID,
+	  .driver_data = (unsigned long)&util_shutdown
+	},
 	/* Time synch guid */
-	{ VMBUS_DEVICE(0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49,
-		       0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf)
-	  .driver_data = (unsigned long)&util_timesynch },
+	{ HV_TS_GUID,
+	  .driver_data = (unsigned long)&util_timesynch
+	},
 	/* Heartbeat guid */
-	{ VMBUS_DEVICE(0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e,
-		       0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d)
-	  .driver_data = (unsigned long)&util_heartbeat },
+	{ HV_HEART_BEAT_GUID,
+	  .driver_data = (unsigned long)&util_heartbeat
+	},
 	/* KVP guid */
-	{ VMBUS_DEVICE(0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d,
-		       0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3,  0xe6)
-	  .driver_data = (unsigned long)&util_kvp },
+	{ HV_KVP_GUID,
+	  .driver_data = (unsigned long)&util_kvp
+	},
 	{ },
 };
 
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index d8d1fad..12f2f9e 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -101,15 +101,6 @@
 /* Define invalid partition identifier. */
 #define HV_PARTITION_ID_INVALID		((u64)0x0)
 
-/* Define connection identifier type. */
-union hv_connection_id {
-	u32 asu32;
-	struct {
-		u32 id:24;
-		u32 reserved:8;
-	} u;
-};
-
 /* Define port identifier type. */
 union hv_port_id {
 	u32 asu32;
@@ -338,13 +329,6 @@
 	u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
 };
 
-/* Definition of the hv_signal_event hypercall input structure. */
-struct hv_input_signal_event {
-	union hv_connection_id connectionid;
-	u16 flag_number;
-	u16 rsvdz;
-};
-
 /*
  * Versioning definitions used for guests reporting themselves to the
  * hypervisor, and visa versa.
@@ -498,11 +482,6 @@
 
 
 
-struct hv_input_signal_event_buffer {
-	u64 align8;
-	struct hv_input_signal_event event;
-};
-
 struct hv_context {
 	/* We only support running on top of Hyper-V
 	* So at this point this really can only contain the Hyper-V ID
@@ -513,16 +492,24 @@
 
 	bool synic_initialized;
 
-	/*
-	 * This is used as an input param to HvCallSignalEvent hypercall. The
-	 * input param is immutable in our usage and must be dynamic mem (vs
-	 * stack or global). */
-	struct hv_input_signal_event_buffer *signal_event_buffer;
-	/* 8-bytes aligned of the buffer above */
-	struct hv_input_signal_event *signal_event_param;
-
 	void *synic_message_page[NR_CPUS];
 	void *synic_event_page[NR_CPUS];
+	/*
+	 * Hypervisor's notion of virtual processor ID is different from
+	 * Linux' notion of CPU ID. This information can only be retrieved
+	 * in the context of the calling CPU. Setup a map for easy access
+	 * to this information:
+	 *
+	 * vp_index[a] is the Hyper-V's processor ID corresponding to
+	 * Linux cpuid 'a'.
+	 */
+	u32 vp_index[NR_CPUS];
+	/*
+	 * Starting with win8, we can take channel interrupts on any CPU;
+	 * we will manage the tasklet that handles events on a per CPU
+	 * basis.
+	 */
+	struct tasklet_struct *event_dpc[NR_CPUS];
 };
 
 extern struct hv_context hv_context;
@@ -538,12 +525,19 @@
 			 enum hv_message_type message_type,
 			 void *payload, size_t payload_size);
 
-extern u16 hv_signal_event(void);
+extern u16 hv_signal_event(void *con_id);
 
 extern void hv_synic_init(void *irqarg);
 
 extern void hv_synic_cleanup(void *arg);
 
+/*
+ * Host version information.
+ */
+extern unsigned int host_info_eax;
+extern unsigned int host_info_ebx;
+extern unsigned int host_info_ecx;
+extern unsigned int host_info_edx;
 
 /* Interface */
 
@@ -555,7 +549,7 @@
 
 int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
 		    struct scatterlist *sglist,
-		    u32 sgcount);
+		    u32 sgcount, bool *signal);
 
 int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
 		   u32 buflen);
@@ -563,13 +557,16 @@
 int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
 		   void *buffer,
 		   u32 buflen,
-		   u32 offset);
+		   u32 offset, bool *signal);
 
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
 
 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
 			    struct hv_ring_buffer_debug_info *debug_info);
 
+void hv_begin_read(struct hv_ring_buffer_info *rbi);
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi);
+
 /*
  * Maximum channels is determined by the size of the interrupt page
  * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
@@ -657,7 +654,7 @@
 
 int vmbus_post_msg(void *buffer, size_t buflen);
 
-int vmbus_set_event(u32 child_relid);
+int vmbus_set_event(struct vmbus_channel *channel);
 
 void vmbus_on_event(unsigned long data);
 
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 7233c88..cafa72f 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -29,6 +29,105 @@
 
 #include "hyperv_vmbus.h"
 
+void hv_begin_read(struct hv_ring_buffer_info *rbi)
+{
+	rbi->ring_buffer->interrupt_mask = 1;
+	smp_mb();
+}
+
+u32 hv_end_read(struct hv_ring_buffer_info *rbi)
+{
+	u32 read;
+	u32 write;
+
+	rbi->ring_buffer->interrupt_mask = 0;
+	smp_mb();
+
+	/*
+	 * Now check to see if the ring buffer is still empty.
+	 * If it is not, we raced and we need to process new
+	 * incoming messages.
+	 */
+	hv_get_ringbuffer_availbytes(rbi, &read, &write);
+
+	return read;
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to
+ * be signaled. Here is the details of this protocol:
+ *
+ *	1. The host guarantees that while it is draining the
+ *	   ring buffer, it will set the interrupt_mask to
+ *	   indicate it does not need to be interrupted when
+ *	   new data is placed.
+ *
+ *	2. The host guarantees that it will completely drain
+ *	   the ring buffer before exiting the read loop. Further,
+ *	   once the ring buffer is empty, it will clear the
+ *	   interrupt_mask and re-check to see if new data has
+ *	   arrived.
+ */
+
+static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
+{
+	if (rbi->ring_buffer->interrupt_mask)
+		return false;
+
+	/*
+	 * This is the only case we need to signal when the
+	 * ring transitions from being empty to non-empty.
+	 */
+	if (old_write == rbi->ring_buffer->read_index)
+		return true;
+
+	return false;
+}
+
+/*
+ * To optimize the flow management on the send-side,
+ * when the sender is blocked because of lack of
+ * sufficient space in the ring buffer, potential the
+ * consumer of the ring buffer can signal the producer.
+ * This is controlled by the following parameters:
+ *
+ * 1. pending_send_sz: This is the size in bytes that the
+ *    producer is trying to send.
+ * 2. The feature bit feat_pending_send_sz set to indicate if
+ *    the consumer of the ring will signal when the ring
+ *    state transitions from being full to a state where
+ *    there is room for the producer to send the pending packet.
+ */
+
+static bool hv_need_to_signal_on_read(u32 old_rd,
+					 struct hv_ring_buffer_info *rbi)
+{
+	u32 prev_write_sz;
+	u32 cur_write_sz;
+	u32 r_size;
+	u32 write_loc = rbi->ring_buffer->write_index;
+	u32 read_loc = rbi->ring_buffer->read_index;
+	u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+
+	/*
+	 * If the other end is not blocked on write don't bother.
+	 */
+	if (pending_sz == 0)
+		return false;
+
+	r_size = rbi->ring_datasize;
+	cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
+			read_loc - write_loc;
+
+	prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
+			old_rd - write_loc;
+
+
+	if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+		return true;
+
+	return false;
+}
 
 /*
  * hv_get_next_write_location()
@@ -239,19 +338,6 @@
 	}
 }
 
-
-/*
- *
- * hv_get_ringbuffer_interrupt_mask()
- *
- * Get the interrupt mask for the specified ring buffer
- *
- */
-u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
-{
-	return rbi->ring_buffer->interrupt_mask;
-}
-
 /*
  *
  * hv_ringbuffer_init()
@@ -298,7 +384,7 @@
  *
  */
 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
-		    struct scatterlist *sglist, u32 sgcount)
+		    struct scatterlist *sglist, u32 sgcount, bool *signal)
 {
 	int i = 0;
 	u32 bytes_avail_towrite;
@@ -307,6 +393,7 @@
 
 	struct scatterlist *sg;
 	u32 next_write_location;
+	u32 old_write;
 	u64 prev_indices = 0;
 	unsigned long flags;
 
@@ -335,6 +422,8 @@
 	/* Write to the ring buffer */
 	next_write_location = hv_get_next_write_location(outring_info);
 
+	old_write = next_write_location;
+
 	for_each_sg(sglist, sg, sgcount, i)
 	{
 		next_write_location = hv_copyto_ringbuffer(outring_info,
@@ -351,14 +440,16 @@
 					     &prev_indices,
 					     sizeof(u64));
 
-	/* Make sure we flush all writes before updating the writeIndex */
-	smp_wmb();
+	/* Issue a full memory barrier before updating the write index */
+	smp_mb();
 
 	/* Now, update the write location */
 	hv_set_next_write_location(outring_info, next_write_location);
 
 
 	spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+
+	*signal = hv_need_to_signal(old_write, outring_info);
 	return 0;
 }
 
@@ -414,13 +505,14 @@
  *
  */
 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
-		   u32 buflen, u32 offset)
+		   u32 buflen, u32 offset, bool *signal)
 {
 	u32 bytes_avail_towrite;
 	u32 bytes_avail_toread;
 	u32 next_read_location = 0;
 	u64 prev_indices = 0;
 	unsigned long flags;
+	u32 old_read;
 
 	if (buflen <= 0)
 		return -EINVAL;
@@ -431,6 +523,8 @@
 				&bytes_avail_toread,
 				&bytes_avail_towrite);
 
+	old_read = bytes_avail_toread;
+
 	/* Make sure there is something to read */
 	if (bytes_avail_toread < buflen) {
 		spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -461,5 +555,7 @@
 
 	spin_unlock_irqrestore(&inring_info->ring_lock, flags);
 
+	*signal = hv_need_to_signal_on_read(old_read, inring_info);
+
 	return 0;
 }
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8e1a9ec..cf19dfa 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -33,6 +33,7 @@
 #include <acpi/acpi_bus.h>
 #include <linux/completion.h>
 #include <linux/hyperv.h>
+#include <linux/kernel_stat.h>
 #include <asm/hyperv.h>
 #include <asm/hypervisor.h>
 #include "hyperv_vmbus.h"
@@ -41,7 +42,6 @@
 static struct acpi_device  *hv_acpi_dev;
 
 static struct tasklet_struct msg_dpc;
-static struct tasklet_struct event_dpc;
 static struct completion probe_event;
 static int irq;
 
@@ -454,21 +454,40 @@
 	union hv_synic_event_flags *event;
 	bool handled = false;
 
+	page_addr = hv_context.synic_event_page[cpu];
+	if (page_addr == NULL)
+		return IRQ_NONE;
+
+	event = (union hv_synic_event_flags *)page_addr +
+					 VMBUS_MESSAGE_SINT;
 	/*
 	 * Check for events before checking for messages. This is the order
 	 * in which events and messages are checked in Windows guests on
 	 * Hyper-V, and the Windows team suggested we do the same.
 	 */
 
-	page_addr = hv_context.synic_event_page[cpu];
-	event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT;
+	if ((vmbus_proto_version == VERSION_WS2008) ||
+		(vmbus_proto_version == VERSION_WIN7)) {
 
-	/* Since we are a child, we only need to check bit 0 */
-	if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) {
+		/* Since we are a child, we only need to check bit 0 */
+		if (sync_test_and_clear_bit(0,
+			(unsigned long *) &event->flags32[0])) {
+			handled = true;
+		}
+	} else {
+		/*
+		 * Our host is win8 or above. The signaling mechanism
+		 * has changed and we can directly look at the event page.
+		 * If bit n is set then we have an interrup on the channel
+		 * whose id is n.
+		 */
 		handled = true;
-		tasklet_schedule(&event_dpc);
 	}
 
+	if (handled)
+		tasklet_schedule(hv_context.event_dpc[cpu]);
+
+
 	page_addr = hv_context.synic_message_page[cpu];
 	msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
 
@@ -485,6 +504,19 @@
 }
 
 /*
+ * vmbus interrupt flow handler:
+ * vmbus interrupts can concurrently occur on multiple CPUs and
+ * can be handled concurrently.
+ */
+
+static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
+{
+	kstat_incr_irqs_this_cpu(irq, desc);
+
+	desc->action->handler(irq, desc->action->dev_id);
+}
+
+/*
  * vmbus_bus_init -Main vmbus driver initialization routine.
  *
  * Here, we
@@ -506,7 +538,6 @@
 	}
 
 	tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
-	tasklet_init(&event_dpc, vmbus_on_event, 0);
 
 	ret = bus_register(&hv_bus);
 	if (ret)
@@ -520,6 +551,13 @@
 		goto err_unregister;
 	}
 
+	/*
+	 * Vmbus interrupts can be handled concurrently on
+	 * different CPUs. Establish an appropriate interrupt flow
+	 * handler that can support this model.
+	 */
+	irq_set_handler(irq, vmbus_flow_handler);
+
 	vector = IRQ0_VECTOR + irq;
 
 	/*
@@ -575,8 +613,6 @@
 
 	ret = driver_register(&hv_driver->driver);
 
-	vmbus_request_offers();
-
 	return ret;
 }
 EXPORT_SYMBOL_GPL(__vmbus_driver_register);
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index ab20a08..141094e 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -20,7 +20,6 @@
 #include <linux/serial.h>
 #include <linux/tty_flip.h>
 #include <linux/slab.h>
-#include <linux/atomic.h>
 #include <linux/io.h>
 #include <linux/ipack.h>
 #include "ipoctal.h"
@@ -38,21 +37,19 @@
 	spinlock_t			lock;
 	unsigned int			pointer_read;
 	unsigned int			pointer_write;
-	atomic_t			open;
 	struct tty_port			tty_port;
 	union scc2698_channel __iomem	*regs;
 	union scc2698_block __iomem	*block_regs;
 	unsigned int			board_id;
-	unsigned char			*board_write;
 	u8				isr_rx_rdy_mask;
 	u8				isr_tx_rdy_mask;
+	unsigned int			rx_enable;
 };
 
 struct ipoctal {
 	struct ipack_device		*dev;
 	unsigned int			board_id;
 	struct ipoctal_channel		channel[NR_CHANNELS];
-	unsigned char			write;
 	struct tty_driver		*tty_drv;
 	u8 __iomem			*mem8_space;
 	u8 __iomem			*int_space;
@@ -64,28 +61,23 @@
 
 	channel = dev_get_drvdata(tty->dev);
 
+	/*
+	 * Enable RX. TX will be enabled when
+	 * there is something to send
+	 */
 	iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+	channel->rx_enable = 1;
 	return 0;
 }
 
 static int ipoctal_open(struct tty_struct *tty, struct file *file)
 {
-	int res;
 	struct ipoctal_channel *channel;
 
 	channel = dev_get_drvdata(tty->dev);
-
-	if (atomic_read(&channel->open))
-		return -EBUSY;
-
 	tty->driver_data = channel;
 
-	res = tty_port_open(&channel->tty_port, tty, file);
-	if (res)
-		return res;
-
-	atomic_inc(&channel->open);
-	return 0;
+	return tty_port_open(&channel->tty_port, tty, file);
 }
 
 static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@@ -111,9 +103,7 @@
 	struct ipoctal_channel *channel = tty->driver_data;
 
 	tty_port_close(&channel->tty_port, tty, filp);
-
-	if (atomic_dec_and_test(&channel->open))
-		ipoctal_free_channel(channel);
+	ipoctal_free_channel(channel);
 }
 
 static int ipoctal_get_icount(struct tty_struct *tty,
@@ -137,11 +127,12 @@
 {
 	struct tty_port *port = &channel->tty_port;
 	unsigned char value;
-	unsigned char flag = TTY_NORMAL;
+	unsigned char flag;
 	u8 isr;
 
 	do {
 		value = ioread8(&channel->regs->r.rhr);
+		flag = TTY_NORMAL;
 		/* Error: count statistics */
 		if (sr & SR_ERROR) {
 			iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -183,10 +174,8 @@
 	unsigned char value;
 	unsigned int *pointer_write = &channel->pointer_write;
 
-	if (channel->nb_bytes <= 0) {
-		channel->nb_bytes = 0;
+	if (channel->nb_bytes == 0)
 		return;
-	}
 
 	value = channel->tty_port.xmit_buf[*pointer_write];
 	iowrite8(value, &channel->regs->w.thr);
@@ -194,39 +183,27 @@
 	(*pointer_write)++;
 	*pointer_write = *pointer_write % PAGE_SIZE;
 	channel->nb_bytes--;
-
-	if ((channel->nb_bytes == 0) &&
-	    (waitqueue_active(&channel->queue))) {
-
-		if (channel->board_id != IPACK1_DEVICE_ID_SBS_OCTAL_485) {
-			*channel->board_write = 1;
-			wake_up_interruptible(&channel->queue);
-		}
-	}
 }
 
 static void ipoctal_irq_channel(struct ipoctal_channel *channel)
 {
 	u8 isr, sr;
 
-	/* If there is no client, skip the check */
-	if (!atomic_read(&channel->open))
-		return;
-
+	spin_lock(&channel->lock);
 	/* The HW is organized in pair of channels.  See which register we need
 	 * to read from */
 	isr = ioread8(&channel->block_regs->r.isr);
 	sr = ioread8(&channel->regs->r.sr);
 
-	/* In case of RS-485, change from TX to RX when finishing TX.
-	 * Half-duplex. */
-	if ((channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) &&
-	    (sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
+	if ((sr & SR_TX_EMPTY) && (channel->nb_bytes == 0)) {
 		iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
-		iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
-		iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
-		*channel->board_write = 1;
-		wake_up_interruptible(&channel->queue);
+		/* In case of RS-485, change from TX to RX when finishing TX.
+		 * Half-duplex. */
+		if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
+			iowrite8(CR_CMD_NEGATE_RTSN, &channel->regs->w.cr);
+			iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+			channel->rx_enable = 1;
+		}
 	}
 
 	/* RX data */
@@ -237,7 +214,7 @@
 	if ((isr & channel->isr_tx_rdy_mask) && (sr & SR_TX_READY))
 		ipoctal_irq_tx(channel);
 
-	tty_flip_buffer_push(&channel->tty_port);
+	spin_unlock(&channel->lock);
 }
 
 static irqreturn_t ipoctal_irq_handler(void *arg)
@@ -245,14 +222,14 @@
 	unsigned int i;
 	struct ipoctal *ipoctal = (struct ipoctal *) arg;
 
-	/* Check all channels */
-	for (i = 0; i < NR_CHANNELS; i++)
-		ipoctal_irq_channel(&ipoctal->channel[i]);
-
 	/* Clear the IPack device interrupt */
 	readw(ipoctal->int_space + ACK_INT_REQ0);
 	readw(ipoctal->int_space + ACK_INT_REQ1);
 
+	/* Check all channels */
+	for (i = 0; i < NR_CHANNELS; i++)
+		ipoctal_irq_channel(&ipoctal->channel[i]);
+
 	return IRQ_HANDLED;
 }
 
@@ -306,7 +283,7 @@
 	ipoctal->mem8_space =
 		devm_ioremap_nocache(&ipoctal->dev->dev,
 				     region->start, 0x8000);
-	if (!addr) {
+	if (!ipoctal->mem8_space) {
 		dev_err(&ipoctal->dev->dev,
 			"Unable to map slot [%d:%d] MEM8 space!\n",
 			bus_nr, slot);
@@ -319,7 +296,6 @@
 		struct ipoctal_channel *channel = &ipoctal->channel[i];
 		channel->regs = chan_regs + i;
 		channel->block_regs = block_regs + (i >> 1);
-		channel->board_write = &ipoctal->write;
 		channel->board_id = ipoctal->board_id;
 		if (i & 1) {
 			channel->isr_tx_rdy_mask = ISR_TxRDY_B;
@@ -330,6 +306,7 @@
 		}
 
 		iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+		channel->rx_enable = 0;
 		iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
 		iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
 		iowrite8(MR1_CHRL_8_BITS | MR1_ERROR_CHAR | MR1_RxINT_RxRDY,
@@ -402,8 +379,6 @@
 
 		ipoctal_reset_stats(&channel->stats);
 		channel->nb_bytes = 0;
-		init_waitqueue_head(&channel->queue);
-
 		spin_lock_init(&channel->lock);
 		channel->pointer_read = 0;
 		channel->pointer_write = 0;
@@ -414,12 +389,6 @@
 			continue;
 		}
 		dev_set_drvdata(tty_dev, channel);
-
-		/*
-		 * Enable again the RX. TX will be enabled when
-		 * there is something to send
-		 */
-		iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
 	}
 
 	return 0;
@@ -459,6 +428,7 @@
 	/* As the IP-OCTAL 485 only supports half duplex, do it manually */
 	if (channel->board_id == IPACK1_DEVICE_ID_SBS_OCTAL_485) {
 		iowrite8(CR_DISABLE_RX, &channel->regs->w.cr);
+		channel->rx_enable = 0;
 		iowrite8(CR_CMD_ASSERT_RTSN, &channel->regs->w.cr);
 	}
 
@@ -467,10 +437,6 @@
 	 * operations
 	 */
 	iowrite8(CR_ENABLE_TX, &channel->regs->w.cr);
-	wait_event_interruptible(channel->queue, *channel->board_write);
-	iowrite8(CR_DISABLE_TX, &channel->regs->w.cr);
-
-	*channel->board_write = 0;
 	return char_copied;
 }
 
@@ -622,8 +588,9 @@
 	iowrite8(mr2, &channel->regs->w.mr);
 	iowrite8(csr, &channel->regs->w.csr);
 
-	/* Enable again the RX */
-	iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
+	/* Enable again the RX, if it was before */
+	if (channel->rx_enable)
+		iowrite8(CR_ENABLE_RX, &channel->regs->w.cr);
 }
 
 static void ipoctal_hangup(struct tty_struct *tty)
@@ -643,6 +610,7 @@
 	tty_port_hangup(&channel->tty_port);
 
 	iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+	channel->rx_enable = 0;
 	iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
 	iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
 	iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
@@ -652,6 +620,22 @@
 	wake_up_interruptible(&channel->tty_port.open_wait);
 }
 
+static void ipoctal_shutdown(struct tty_struct *tty)
+{
+	struct ipoctal_channel *channel = tty->driver_data;
+
+	if (channel == NULL)
+		return;
+
+	iowrite8(CR_DISABLE_RX | CR_DISABLE_TX, &channel->regs->w.cr);
+	channel->rx_enable = 0;
+	iowrite8(CR_CMD_RESET_RX, &channel->regs->w.cr);
+	iowrite8(CR_CMD_RESET_TX, &channel->regs->w.cr);
+	iowrite8(CR_CMD_RESET_ERR_STATUS, &channel->regs->w.cr);
+	iowrite8(CR_CMD_RESET_MR, &channel->regs->w.cr);
+	clear_bit(ASYNCB_INITIALIZED, &channel->tty_port.flags);
+}
+
 static const struct tty_operations ipoctal_fops = {
 	.ioctl =		NULL,
 	.open =			ipoctal_open,
@@ -662,6 +646,7 @@
 	.chars_in_buffer =	ipoctal_chars_in_buffer,
 	.get_icount =		ipoctal_get_icount,
 	.hangup =		ipoctal_hangup,
+	.shutdown =		ipoctal_shutdown,
 };
 
 static int ipoctal_probe(struct ipack_device *dev)
diff --git a/drivers/mfd/wm5102-tables.c b/drivers/mfd/wm5102-tables.c
index f6fcb87b..a9d9d41 100644
--- a/drivers/mfd/wm5102-tables.c
+++ b/drivers/mfd/wm5102-tables.c
@@ -84,6 +84,12 @@
 }
 
 static const struct regmap_irq wm5102_aod_irqs[ARIZONA_NUM_IRQ] = {
+	[ARIZONA_IRQ_MICD_CLAMP_FALL] = {
+		.mask = ARIZONA_MICD_CLAMP_FALL_EINT1
+	},
+	[ARIZONA_IRQ_MICD_CLAMP_RISE] = {
+		.mask = ARIZONA_MICD_CLAMP_RISE_EINT1
+	},
 	[ARIZONA_IRQ_GP5_FALL] = { .mask = ARIZONA_GP5_FALL_EINT1 },
 	[ARIZONA_IRQ_GP5_RISE] = { .mask = ARIZONA_GP5_RISE_EINT1 },
 	[ARIZONA_IRQ_JD_FALL] = { .mask = ARIZONA_JD1_FALL_EINT1 },
@@ -313,6 +319,7 @@
 	{ 0x0000021A, 0x01A6 },   /* R538   - Mic Bias Ctrl 3 */ 
 	{ 0x00000293, 0x0000 },   /* R659   - Accessory Detect Mode 1 */ 
 	{ 0x0000029B, 0x0020 },   /* R667   - Headphone Detect 1 */ 
+	{ 0x000002A2, 0x0000 },   /* R674   - Micd clamp control */
 	{ 0x000002A3, 0x1102 },   /* R675   - Mic Detect 1 */ 
 	{ 0x000002A4, 0x009F },   /* R676   - Mic Detect 2 */ 
 	{ 0x000002A5, 0x0000 },   /* R677   - Mic Detect 3 */ 
@@ -1107,6 +1114,8 @@
 	case ARIZONA_ACCESSORY_DETECT_MODE_1:
 	case ARIZONA_HEADPHONE_DETECT_1:
 	case ARIZONA_HEADPHONE_DETECT_2:
+	case ARIZONA_HP_DACVAL:
+	case ARIZONA_MICD_CLAMP_CONTROL:
 	case ARIZONA_MIC_DETECT_1:
 	case ARIZONA_MIC_DETECT_2:
 	case ARIZONA_MIC_DETECT_3:
@@ -1876,6 +1885,7 @@
 	case ARIZONA_DSP1_STATUS_2:
 	case ARIZONA_DSP1_STATUS_3:
 	case ARIZONA_HEADPHONE_DETECT_2:
+	case ARIZONA_HP_DACVAL:
 	case ARIZONA_MIC_DETECT_3:
 		return true;
 	default:
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 668a582..e83fdfe 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -499,6 +499,17 @@
 	  stereo and mono audio, video, microphone and UART data to use
 	  a common connector port.
 
+config LATTICE_ECP3_CONFIG
+	tristate "Lattice ECP3 FPGA bitstream configuration via SPI"
+	depends on SPI && SYSFS
+	select FW_LOADER
+	default	n
+	help
+	  This option enables support for bitstream configuration (programming
+	  or loading) of the Lattice ECP3 FPGA family via SPI.
+
+	  If unsure, say N.
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
@@ -507,4 +518,5 @@
 source "drivers/misc/carma/Kconfig"
 source "drivers/misc/altera-stapl/Kconfig"
 source "drivers/misc/mei/Kconfig"
+source "drivers/misc/vmw_vmci/Kconfig"
 endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 2129377..35a1463 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,3 +49,6 @@
 obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
 obj-$(CONFIG_ALTERA_STAPL)	+=altera-stapl/
 obj-$(CONFIG_INTEL_MEI)		+= mei/
+obj-$(CONFIG_MAX8997_MUIC)	+= max8997-muic.o
+obj-$(CONFIG_VMWARE_VMCI)	+= vmw_vmci/
+obj-$(CONFIG_LATTICE_ECP3_CONFIG)	+= lattice-ecp3-config.o
diff --git a/drivers/misc/cb710/Kconfig b/drivers/misc/cb710/Kconfig
index 22429b8..5acb9c5 100644
--- a/drivers/misc/cb710/Kconfig
+++ b/drivers/misc/cb710/Kconfig
@@ -1,6 +1,6 @@
 config CB710_CORE
 	tristate "ENE CB710/720 Flash memory card reader support"
-	depends on PCI
+	depends on PCI && GENERIC_HARDIRQS
 	help
 	  This option enables support for PCI ENE CB710/720 Flash memory card
 	  reader found in some laptops (ie. some versions of HP Compaq nx9500).
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
new file mode 100644
index 0000000..155700b
--- /dev/null
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2012 Stefan Roese <sr@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#define FIRMWARE_NAME	"lattice-ecp3.bit"
+
+/*
+ * The JTAG ID's of the supported FPGA's. The ID is 32bit wide
+ * reversed as noted in the manual.
+ */
+#define ID_ECP3_17	0xc2088080
+#define ID_ECP3_35	0xc2048080
+
+/* FPGA commands */
+#define FPGA_CMD_READ_ID	0x07	/* plus 24 bits */
+#define FPGA_CMD_READ_STATUS	0x09	/* plus 24 bits */
+#define FPGA_CMD_CLEAR		0x70
+#define FPGA_CMD_REFRESH	0x71
+#define FPGA_CMD_WRITE_EN	0x4a	/* plus 2 bits */
+#define FPGA_CMD_WRITE_DIS	0x4f	/* plus 8 bits */
+#define FPGA_CMD_WRITE_INC	0x41	/* plus 0 bits */
+
+/*
+ * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf
+ * (LatticeECP3 Slave SPI Port User's Guide)
+ */
+#define FPGA_STATUS_DONE	0x00004000
+#define FPGA_STATUS_CLEARED	0x00010000
+
+#define FPGA_CLEAR_TIMEOUT	5000	/* max. 5000ms for FPGA clear */
+#define FPGA_CLEAR_MSLEEP	10
+#define FPGA_CLEAR_LOOP_COUNT	(FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP)
+
+struct fpga_data {
+	struct completion fw_loaded;
+};
+
+struct ecp3_dev {
+	u32 jedec_id;
+	char *name;
+};
+
+static const struct ecp3_dev ecp3_dev[] = {
+	{
+		.jedec_id = ID_ECP3_17,
+		.name = "Lattice ECP3-17",
+	},
+	{
+		.jedec_id = ID_ECP3_35,
+		.name = "Lattice ECP3-35",
+	},
+};
+
+static void firmware_load(const struct firmware *fw, void *context)
+{
+	struct spi_device *spi = (struct spi_device *)context;
+	struct fpga_data *data = dev_get_drvdata(&spi->dev);
+	u8 *buffer;
+	int ret;
+	u8 txbuf[8];
+	u8 rxbuf[8];
+	int rx_len = 8;
+	int i;
+	u32 jedec_id;
+	u32 status;
+
+	if (fw->size == 0) {
+		dev_err(&spi->dev, "Error: Firmware size is 0!\n");
+		return;
+	}
+
+	/* Fill dummy data (24 stuffing bits for commands) */
+	txbuf[1] = 0x00;
+	txbuf[2] = 0x00;
+	txbuf[3] = 0x00;
+
+	/* Trying to speak with the FPGA via SPI... */
+	txbuf[0] = FPGA_CMD_READ_ID;
+	ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+	dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", *(u32 *)&rxbuf[4]);
+	jedec_id = *(u32 *)&rxbuf[4];
+
+	for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
+		if (jedec_id == ecp3_dev[i].jedec_id)
+			break;
+	}
+	if (i == ARRAY_SIZE(ecp3_dev)) {
+		dev_err(&spi->dev,
+			"Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
+			jedec_id);
+		return;
+	}
+
+	dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
+
+	txbuf[0] = FPGA_CMD_READ_STATUS;
+	ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+	dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+
+	buffer = kzalloc(fw->size + 8, GFP_KERNEL);
+	if (!buffer) {
+		dev_err(&spi->dev, "Error: Can't allocate memory!\n");
+		return;
+	}
+
+	/*
+	 * Insert WRITE_INC command into stream (one SPI frame)
+	 */
+	buffer[0] = FPGA_CMD_WRITE_INC;
+	buffer[1] = 0xff;
+	buffer[2] = 0xff;
+	buffer[3] = 0xff;
+	memcpy(buffer + 4, fw->data, fw->size);
+
+	txbuf[0] = FPGA_CMD_REFRESH;
+	ret = spi_write(spi, txbuf, 4);
+
+	txbuf[0] = FPGA_CMD_WRITE_EN;
+	ret = spi_write(spi, txbuf, 4);
+
+	txbuf[0] = FPGA_CMD_CLEAR;
+	ret = spi_write(spi, txbuf, 4);
+
+	/*
+	 * Wait for FPGA memory to become cleared
+	 */
+	for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
+		txbuf[0] = FPGA_CMD_READ_STATUS;
+		ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+		status = *(u32 *)&rxbuf[4];
+		if (status == FPGA_STATUS_CLEARED)
+			break;
+
+		msleep(FPGA_CLEAR_MSLEEP);
+	}
+
+	if (i == FPGA_CLEAR_LOOP_COUNT) {
+		dev_err(&spi->dev,
+			"Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
+			status);
+		kfree(buffer);
+		return;
+	}
+
+	dev_info(&spi->dev, "Configuring the FPGA...\n");
+	ret = spi_write(spi, buffer, fw->size + 8);
+
+	txbuf[0] = FPGA_CMD_WRITE_DIS;
+	ret = spi_write(spi, txbuf, 4);
+
+	txbuf[0] = FPGA_CMD_READ_STATUS;
+	ret = spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
+	dev_dbg(&spi->dev, "FPGA Status=%08x\n", *(u32 *)&rxbuf[4]);
+	status = *(u32 *)&rxbuf[4];
+
+	/* Check result */
+	if (status & FPGA_STATUS_DONE)
+		dev_info(&spi->dev, "FPGA succesfully configured!\n");
+	else
+		dev_info(&spi->dev, "FPGA not configured (DONE not set)\n");
+
+	/*
+	 * Don't forget to release the firmware again
+	 */
+	release_firmware(fw);
+
+	kfree(buffer);
+
+	complete(&data->fw_loaded);
+}
+
+static int lattice_ecp3_probe(struct spi_device *spi)
+{
+	struct fpga_data *data;
+	int err;
+
+	data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		dev_err(&spi->dev, "Memory allocation for fpga_data failed\n");
+		return -ENOMEM;
+	}
+	spi_set_drvdata(spi, data);
+
+	init_completion(&data->fw_loaded);
+	err = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG,
+				      FIRMWARE_NAME, &spi->dev,
+				      GFP_KERNEL, spi, firmware_load);
+	if (err) {
+		dev_err(&spi->dev, "Firmware loading failed with %d!\n", err);
+		return err;
+	}
+
+	dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n");
+
+	return 0;
+}
+
+static int lattice_ecp3_remove(struct spi_device *spi)
+{
+	struct fpga_data *data = spi_get_drvdata(spi);
+
+	wait_for_completion(&data->fw_loaded);
+
+	return 0;
+}
+
+static const struct spi_device_id lattice_ecp3_id[] = {
+	{ "ecp3-17", 0 },
+	{ "ecp3-35", 0 },
+	{ }
+};
+MODULE_DEVICE_TABLE(spi, lattice_ecp3_id);
+
+static struct spi_driver lattice_ecp3_driver = {
+	.driver = {
+		.name = "lattice-ecp3",
+		.owner = THIS_MODULE,
+	},
+	.probe = lattice_ecp3_probe,
+	.remove = lattice_ecp3_remove,
+	.id_table = lattice_ecp3_id,
+};
+
+module_spi_driver(lattice_ecp3_driver);
+
+MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
+MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index 5a79ccd..d21b4d0 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -1,11 +1,22 @@
 config INTEL_MEI
-	tristate "Intel Management Engine Interface (Intel MEI)"
+	tristate "Intel Management Engine Interface"
 	depends on X86 && PCI && WATCHDOG_CORE
 	help
 	  The Intel Management Engine (Intel ME) provides Manageability,
 	  Security and Media services for system containing Intel chipsets.
 	  if selected /dev/mei misc device will be created.
 
+	  For more information see
+	  <http://software.intel.com/en-us/manageability/>
+
+config INTEL_MEI_ME
+	bool "ME Enabled Intel Chipsets"
+	depends on INTEL_MEI
+	depends on X86 && PCI && WATCHDOG_CORE
+	default y
+	help
+	  MEI support for ME Enabled Intel chipsets.
+
 	  Supported Chipsets are:
 	  7 Series Chipset Family
 	  6 Series Chipset Family
@@ -24,5 +35,3 @@
 	  82Q33 Express
 	  82X38/X48 Express
 
-	  For more information see
-	  <http://software.intel.com/en-us/manageability/>
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index 0017842..040af6c 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -4,9 +4,11 @@
 #
 obj-$(CONFIG_INTEL_MEI) += mei.o
 mei-objs := init.o
+mei-objs += hbm.o
 mei-objs += interrupt.o
-mei-objs += interface.o
-mei-objs += iorw.o
+mei-objs += client.o
 mei-objs += main.o
 mei-objs += amthif.o
 mei-objs += wd.o
+mei-$(CONFIG_INTEL_MEI_ME) += pci-me.o
+mei-$(CONFIG_INTEL_MEI_ME) += hw-me.o
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index e40ffd9..c86d7e3 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -31,15 +31,16 @@
 #include <linux/jiffies.h>
 #include <linux/uaccess.h>
 
+#include <linux/mei.h>
 
 #include "mei_dev.h"
-#include "hw.h"
-#include <linux/mei.h>
-#include "interface.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
 
-const uuid_le mei_amthi_guid  = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d, 0xac,
-						0xa8, 0x46, 0xe0, 0xff, 0x65,
-						0x81, 0x4c);
+const uuid_le mei_amthif_guid  = UUID_LE(0x12f80028, 0xb4b7, 0x4b2d,
+					 0xac, 0xa8, 0x46, 0xe0,
+					 0xff, 0x65, 0x81, 0x4c);
 
 /**
  * mei_amthif_reset_params - initializes mei device iamthif
@@ -64,22 +65,24 @@
  * @dev: the device structure
  *
  */
-void mei_amthif_host_init(struct mei_device *dev)
+int mei_amthif_host_init(struct mei_device *dev)
 {
-	int i;
+	struct mei_cl *cl = &dev->iamthif_cl;
 	unsigned char *msg_buf;
+	int ret, i;
 
-	mei_cl_init(&dev->iamthif_cl, dev);
-	dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
+	dev->iamthif_state = MEI_IAMTHIF_IDLE;
 
-	/* find ME amthi client */
-	i = mei_me_cl_link(dev, &dev->iamthif_cl,
-			    &mei_amthi_guid, MEI_IAMTHIF_HOST_CLIENT_ID);
+	mei_cl_init(cl, dev);
+
+	i = mei_me_cl_by_uuid(dev, &mei_amthif_guid);
 	if (i < 0) {
-		dev_info(&dev->pdev->dev, "failed to find iamthif client.\n");
-		return;
+		dev_info(&dev->pdev->dev, "amthif: failed to find the client\n");
+		return -ENOENT;
 	}
 
+	cl->me_client_id = dev->me_clients[i].client_id;
+
 	/* Assign iamthif_mtu to the value received from ME  */
 
 	dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
@@ -93,19 +96,29 @@
 	msg_buf = kcalloc(dev->iamthif_mtu,
 			sizeof(unsigned char), GFP_KERNEL);
 	if (!msg_buf) {
-		dev_dbg(&dev->pdev->dev, "memory allocation for ME message buffer failed.\n");
-		return;
+		dev_err(&dev->pdev->dev, "amthif: memory allocation for ME message buffer failed.\n");
+		return -ENOMEM;
 	}
 
 	dev->iamthif_msg_buf = msg_buf;
 
-	if (mei_connect(dev, &dev->iamthif_cl)) {
-		dev_dbg(&dev->pdev->dev, "Failed to connect to AMTHI client\n");
-		dev->iamthif_cl.state = MEI_FILE_DISCONNECTED;
-		dev->iamthif_cl.host_client_id = 0;
-	} else {
-		dev->iamthif_cl.timer_count = MEI_CONNECT_TIMEOUT;
+	ret = mei_cl_link(cl, MEI_IAMTHIF_HOST_CLIENT_ID);
+
+	if (ret < 0) {
+		dev_err(&dev->pdev->dev, "amthif: failed link client\n");
+		return -ENOENT;
 	}
+
+	cl->state = MEI_FILE_CONNECTING;
+
+	if (mei_hbm_cl_connect_req(dev, cl)) {
+		dev_dbg(&dev->pdev->dev, "amthif: Failed to connect to ME client\n");
+		cl->state = MEI_FILE_DISCONNECTED;
+		cl->host_client_id = 0;
+	} else {
+		cl->timer_count = MEI_CONNECT_TIMEOUT;
+	}
+	return 0;
 }
 
 /**
@@ -168,10 +181,10 @@
 	i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
 
 	if (i < 0) {
-		dev_dbg(&dev->pdev->dev, "amthi client not found.\n");
+		dev_dbg(&dev->pdev->dev, "amthif client not found.\n");
 		return -ENODEV;
 	}
-	dev_dbg(&dev->pdev->dev, "checking amthi data\n");
+	dev_dbg(&dev->pdev->dev, "checking amthif data\n");
 	cb = mei_amthif_find_read_list_entry(dev, file);
 
 	/* Check for if we can block or not*/
@@ -179,7 +192,7 @@
 		return -EAGAIN;
 
 
-	dev_dbg(&dev->pdev->dev, "waiting for amthi data\n");
+	dev_dbg(&dev->pdev->dev, "waiting for amthif data\n");
 	while (cb == NULL) {
 		/* unlock the Mutex */
 		mutex_unlock(&dev->device_lock);
@@ -197,17 +210,17 @@
 	}
 
 
-	dev_dbg(&dev->pdev->dev, "Got amthi data\n");
+	dev_dbg(&dev->pdev->dev, "Got amthif data\n");
 	dev->iamthif_timer = 0;
 
 	if (cb) {
 		timeout = cb->read_time +
 			mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
-		dev_dbg(&dev->pdev->dev, "amthi timeout = %lud\n",
+		dev_dbg(&dev->pdev->dev, "amthif timeout = %lud\n",
 				timeout);
 
 		if  (time_after(jiffies, timeout)) {
-			dev_dbg(&dev->pdev->dev, "amthi Time out\n");
+			dev_dbg(&dev->pdev->dev, "amthif Time out\n");
 			/* 15 sec for the message has expired */
 			list_del(&cb->list);
 			rets = -ETIMEDOUT;
@@ -227,9 +240,9 @@
 		 * remove message from deletion list
 		 */
 
-	dev_dbg(&dev->pdev->dev, "amthi cb->response_buffer size - %d\n",
+	dev_dbg(&dev->pdev->dev, "amthif cb->response_buffer size - %d\n",
 	    cb->response_buffer.size);
-	dev_dbg(&dev->pdev->dev, "amthi cb->buf_idx - %lu\n", cb->buf_idx);
+	dev_dbg(&dev->pdev->dev, "amthif cb->buf_idx - %lu\n", cb->buf_idx);
 
 	/* length is being turncated to PAGE_SIZE, however,
 	 * the buf_idx may point beyond */
@@ -245,7 +258,7 @@
 		}
 	}
 free:
-	dev_dbg(&dev->pdev->dev, "free amthi cb memory.\n");
+	dev_dbg(&dev->pdev->dev, "free amthif cb memory.\n");
 	*offset = 0;
 	mei_io_cb_free(cb);
 out:
@@ -269,7 +282,7 @@
 	if (!dev || !cb)
 		return -ENODEV;
 
-	dev_dbg(&dev->pdev->dev, "write data to amthi client.\n");
+	dev_dbg(&dev->pdev->dev, "write data to amthif client.\n");
 
 	dev->iamthif_state = MEI_IAMTHIF_WRITING;
 	dev->iamthif_current_cb = cb;
@@ -280,15 +293,15 @@
 	memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
 	       cb->request_buffer.size);
 
-	ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
+	ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl);
 	if (ret < 0)
 		return ret;
 
-	if (ret && dev->mei_host_buffer_is_empty) {
+	if (ret && dev->hbuf_is_ready) {
 		ret = 0;
-		dev->mei_host_buffer_is_empty = false;
-		if (cb->request_buffer.size > mei_hbuf_max_data(dev)) {
-			mei_hdr.length = mei_hbuf_max_data(dev);
+		dev->hbuf_is_ready = false;
+		if (cb->request_buffer.size > mei_hbuf_max_len(dev)) {
+			mei_hdr.length = mei_hbuf_max_len(dev);
 			mei_hdr.msg_complete = 0;
 		} else {
 			mei_hdr.length = cb->request_buffer.size;
@@ -300,25 +313,24 @@
 		mei_hdr.reserved = 0;
 		dev->iamthif_msg_buf_index += mei_hdr.length;
 		if (mei_write_message(dev, &mei_hdr,
-					(unsigned char *)(dev->iamthif_msg_buf),
-					mei_hdr.length))
+					(unsigned char *)dev->iamthif_msg_buf))
 			return -ENODEV;
 
 		if (mei_hdr.msg_complete) {
-			if (mei_flow_ctrl_reduce(dev, &dev->iamthif_cl))
+			if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl))
 				return -ENODEV;
 			dev->iamthif_flow_control_pending = true;
 			dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
-			dev_dbg(&dev->pdev->dev, "add amthi cb to write waiting list\n");
+			dev_dbg(&dev->pdev->dev, "add amthif cb to write waiting list\n");
 			dev->iamthif_current_cb = cb;
 			dev->iamthif_file_object = cb->file_object;
 			list_add_tail(&cb->list, &dev->write_waiting_list.list);
 		} else {
-			dev_dbg(&dev->pdev->dev, "message does not complete, so add amthi cb to write list.\n");
+			dev_dbg(&dev->pdev->dev, "message does not complete, so add amthif cb to write list.\n");
 			list_add_tail(&cb->list, &dev->write_list.list);
 		}
 	} else {
-		if (!(dev->mei_host_buffer_is_empty))
+		if (!dev->hbuf_is_ready)
 			dev_dbg(&dev->pdev->dev, "host buffer is not empty");
 
 		dev_dbg(&dev->pdev->dev, "No flow control credentials, so add iamthif cb to write list.\n");
@@ -383,7 +395,7 @@
 	dev->iamthif_timer = 0;
 	dev->iamthif_file_object = NULL;
 
-	dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
+	dev_dbg(&dev->pdev->dev, "complete amthif cmd_list cb.\n");
 
 	list_for_each_entry_safe(pos, next, &dev->amthif_cmd_list.list, list) {
 		list_del(&pos->list);
@@ -392,7 +404,7 @@
 			status = mei_amthif_send_cmd(dev, pos);
 			if (status) {
 				dev_dbg(&dev->pdev->dev,
-					"amthi write failed status = %d\n",
+					"amthif write failed status = %d\n",
 						status);
 				return;
 			}
@@ -412,7 +424,7 @@
 	if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
 		dev->iamthif_file_object == file) {
 		mask |= (POLLIN | POLLRDNORM);
-		dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
+		dev_dbg(&dev->pdev->dev, "run next amthif cb\n");
 		mei_amthif_run_next_cmd(dev);
 	}
 	return mask;
@@ -434,54 +446,51 @@
 int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
 			struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
 {
-	struct mei_msg_hdr *mei_hdr;
+	struct mei_msg_hdr mei_hdr;
 	struct mei_cl *cl = cb->cl;
 	size_t len = dev->iamthif_msg_buf_size - dev->iamthif_msg_buf_index;
 	size_t msg_slots = mei_data2slots(len);
 
-	mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
-	mei_hdr->host_addr = cl->host_client_id;
-	mei_hdr->me_addr = cl->me_client_id;
-	mei_hdr->reserved = 0;
+	mei_hdr.host_addr = cl->host_client_id;
+	mei_hdr.me_addr = cl->me_client_id;
+	mei_hdr.reserved = 0;
 
 	if (*slots >= msg_slots) {
-		mei_hdr->length = len;
-		mei_hdr->msg_complete = 1;
+		mei_hdr.length = len;
+		mei_hdr.msg_complete = 1;
 	/* Split the message only if we can write the whole host buffer */
 	} else if (*slots == dev->hbuf_depth) {
 		msg_slots = *slots;
 		len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
-		mei_hdr->length = len;
-		mei_hdr->msg_complete = 0;
+		mei_hdr.length = len;
+		mei_hdr.msg_complete = 0;
 	} else {
 		/* wait for next time the host buffer is empty */
 		return 0;
 	}
 
-	dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
-			mei_hdr->length, mei_hdr->msg_complete);
+	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT,  MEI_HDR_PRM(&mei_hdr));
 
 	*slots -=  msg_slots;
-	if (mei_write_message(dev, mei_hdr,
-		dev->iamthif_msg_buf + dev->iamthif_msg_buf_index,
-		mei_hdr->length)) {
+	if (mei_write_message(dev, &mei_hdr,
+		dev->iamthif_msg_buf + dev->iamthif_msg_buf_index)) {
 			dev->iamthif_state = MEI_IAMTHIF_IDLE;
 			cl->status = -ENODEV;
 			list_del(&cb->list);
 			return -ENODEV;
 	}
 
-	if (mei_flow_ctrl_reduce(dev, cl))
+	if (mei_cl_flow_ctrl_reduce(cl))
 		return -ENODEV;
 
-	dev->iamthif_msg_buf_index += mei_hdr->length;
+	dev->iamthif_msg_buf_index += mei_hdr.length;
 	cl->status = 0;
 
-	if (mei_hdr->msg_complete) {
+	if (mei_hdr.msg_complete) {
 		dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
 		dev->iamthif_flow_control_pending = true;
 
-		/* save iamthif cb sent to amthi client */
+		/* save iamthif cb sent to amthif client */
 		cb->buf_idx = dev->iamthif_msg_buf_index;
 		dev->iamthif_current_cb = cb;
 
@@ -494,11 +503,11 @@
 
 /**
  * mei_amthif_irq_read_message - read routine after ISR to
- *			handle the read amthi message
+ *			handle the read amthif message
  *
  * @complete_list: An instance of our list structure
  * @dev: the device structure
- * @mei_hdr: header of amthi message
+ * @mei_hdr: header of amthif message
  *
  * returns 0 on success, <0 on failure.
  */
@@ -522,10 +531,10 @@
 		return 0;
 
 	dev_dbg(&dev->pdev->dev,
-			"amthi_message_buffer_index =%d\n",
+			"amthif_message_buffer_index =%d\n",
 			mei_hdr->length);
 
-	dev_dbg(&dev->pdev->dev, "completed amthi read.\n ");
+	dev_dbg(&dev->pdev->dev, "completed amthif read.\n ");
 	if (!dev->iamthif_current_cb)
 		return -ENODEV;
 
@@ -540,8 +549,8 @@
 	cb->read_time = jiffies;
 	if (dev->iamthif_ioctl && cb->cl == &dev->iamthif_cl) {
 		/* found the iamthif cb */
-		dev_dbg(&dev->pdev->dev, "complete the amthi read cb.\n ");
-		dev_dbg(&dev->pdev->dev, "add the amthi read cb to complete.\n ");
+		dev_dbg(&dev->pdev->dev, "complete the amthif read cb.\n ");
+		dev_dbg(&dev->pdev->dev, "add the amthif read cb to complete.\n ");
 		list_add_tail(&cb->list, &complete_list->list);
 	}
 	return 0;
@@ -563,7 +572,7 @@
 		return -EMSGSIZE;
 	}
 	*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
-	if (mei_send_flow_control(dev, &dev->iamthif_cl)) {
+	if (mei_hbm_cl_flow_control_req(dev, &dev->iamthif_cl)) {
 		dev_dbg(&dev->pdev->dev, "iamthif flow control failed\n");
 		return -EIO;
 	}
@@ -574,7 +583,7 @@
 	dev->iamthif_msg_buf_index = 0;
 	dev->iamthif_msg_buf_size = 0;
 	dev->iamthif_stall_timer = MEI_IAMTHIF_STALL_TIMER;
-	dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
+	dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
 	return 0;
 }
 
@@ -593,7 +602,7 @@
 				dev->iamthif_msg_buf,
 				dev->iamthif_msg_buf_index);
 		list_add_tail(&cb->list, &dev->amthif_rd_complete_list.list);
-		dev_dbg(&dev->pdev->dev, "amthi read completed\n");
+		dev_dbg(&dev->pdev->dev, "amthif read completed\n");
 		dev->iamthif_timer = jiffies;
 		dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
 				dev->iamthif_timer);
@@ -601,7 +610,7 @@
 		mei_amthif_run_next_cmd(dev);
 	}
 
-	dev_dbg(&dev->pdev->dev, "completing amthi call back.\n");
+	dev_dbg(&dev->pdev->dev, "completing amthif call back.\n");
 	wake_up_interruptible(&dev->iamthif_cl.wait);
 }
 
@@ -635,7 +644,8 @@
 			if (dev->iamthif_current_cb == cb_pos) {
 				dev->iamthif_current_cb = NULL;
 				/* send flow control to iamthif client */
-				mei_send_flow_control(dev, &dev->iamthif_cl);
+				mei_hbm_cl_flow_control_req(dev,
+							&dev->iamthif_cl);
 			}
 			/* free all allocated buffers */
 			mei_io_cb_free(cb_pos);
@@ -706,11 +716,11 @@
 	if (dev->iamthif_file_object == file &&
 	    dev->iamthif_state != MEI_IAMTHIF_IDLE) {
 
-		dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
+		dev_dbg(&dev->pdev->dev, "amthif canceled iamthif state %d\n",
 		    dev->iamthif_state);
 		dev->iamthif_canceled = true;
 		if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
-			dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
+			dev_dbg(&dev->pdev->dev, "run next amthif iamthif cb\n");
 			mei_amthif_run_next_cmd(dev);
 		}
 	}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
new file mode 100644
index 0000000..1569afe
--- /dev/null
+++ b/drivers/misc/mei/client.c
@@ -0,0 +1,729 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "client.h"
+
+/**
+ * mei_me_cl_by_uuid - locate index of me client
+ *
+ * @dev: mei device
+ * returns me client index or -ENOENT if not found
+ */
+int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
+{
+	int i, res = -ENOENT;
+
+	for (i = 0; i < dev->me_clients_num; ++i)
+		if (uuid_le_cmp(*uuid,
+				dev->me_clients[i].props.protocol_name) == 0) {
+			res = i;
+			break;
+		}
+
+	return res;
+}
+
+
+/**
+ * mei_me_cl_by_id return index to me_clients for client_id
+ *
+ * @dev: the device structure
+ * @client_id: me client id
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns index on success, -ENOENT on failure.
+ */
+
+int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
+{
+	int i;
+	for (i = 0; i < dev->me_clients_num; i++)
+		if (dev->me_clients[i].client_id == client_id)
+			break;
+	if (WARN_ON(dev->me_clients[i].client_id != client_id))
+		return -ENOENT;
+
+	if (i == dev->me_clients_num)
+		return -ENOENT;
+
+	return i;
+}
+
+
+/**
+ * mei_io_list_flush - removes list entry belonging to cl.
+ *
+ * @list:  An instance of our list structure
+ * @cl: host client
+ */
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+{
+	struct mei_cl_cb *cb;
+	struct mei_cl_cb *next;
+
+	list_for_each_entry_safe(cb, next, &list->list, list) {
+		if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
+			list_del(&cb->list);
+	}
+}
+
+/**
+ * mei_io_cb_free - free mei_cb_private related memory
+ *
+ * @cb: mei callback struct
+ */
+void mei_io_cb_free(struct mei_cl_cb *cb)
+{
+	if (cb == NULL)
+		return;
+
+	kfree(cb->request_buffer.data);
+	kfree(cb->response_buffer.data);
+	kfree(cb);
+}
+
+/**
+ * mei_io_cb_init - allocate and initialize io callback
+ *
+ * @cl - mei client
+ * @file: pointer to file structure
+ *
+ * returns mei_cl_cb pointer or NULL;
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
+{
+	struct mei_cl_cb *cb;
+
+	cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
+	if (!cb)
+		return NULL;
+
+	mei_io_list_init(cb);
+
+	cb->file_object = fp;
+	cb->cl = cl;
+	cb->buf_idx = 0;
+	return cb;
+}
+
+/**
+ * mei_io_cb_alloc_req_buf - allocate request buffer
+ *
+ * @cb -  io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ *         -EINVAL if cb is NULL
+ *         -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
+{
+	if (!cb)
+		return -EINVAL;
+
+	if (length == 0)
+		return 0;
+
+	cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
+	if (!cb->request_buffer.data)
+		return -ENOMEM;
+	cb->request_buffer.size = length;
+	return 0;
+}
+/**
+ * mei_io_cb_alloc_req_buf - allocate respose buffer
+ *
+ * @cb -  io callback structure
+ * @size: size of the buffer
+ *
+ * returns 0 on success
+ *         -EINVAL if cb is NULL
+ *         -ENOMEM if allocation failed
+ */
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
+{
+	if (!cb)
+		return -EINVAL;
+
+	if (length == 0)
+		return 0;
+
+	cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
+	if (!cb->response_buffer.data)
+		return -ENOMEM;
+	cb->response_buffer.size = length;
+	return 0;
+}
+
+
+
+/**
+ * mei_cl_flush_queues - flushes queue lists belonging to cl.
+ *
+ * @dev: the device structure
+ * @cl: host client
+ */
+int mei_cl_flush_queues(struct mei_cl *cl)
+{
+	if (WARN_ON(!cl || !cl->dev))
+		return -EINVAL;
+
+	dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
+	mei_io_list_flush(&cl->dev->read_list, cl);
+	mei_io_list_flush(&cl->dev->write_list, cl);
+	mei_io_list_flush(&cl->dev->write_waiting_list, cl);
+	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
+	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
+	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
+	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
+	return 0;
+}
+
+
+/**
+ * mei_cl_init - initializes intialize cl.
+ *
+ * @cl: host client to be initialized
+ * @dev: mei device
+ */
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
+{
+	memset(cl, 0, sizeof(struct mei_cl));
+	init_waitqueue_head(&cl->wait);
+	init_waitqueue_head(&cl->rx_wait);
+	init_waitqueue_head(&cl->tx_wait);
+	INIT_LIST_HEAD(&cl->link);
+	cl->reading_state = MEI_IDLE;
+	cl->writing_state = MEI_IDLE;
+	cl->dev = dev;
+}
+
+/**
+ * mei_cl_allocate - allocates cl  structure and sets it up.
+ *
+ * @dev: mei device
+ * returns  The allocated file or NULL on failure
+ */
+struct mei_cl *mei_cl_allocate(struct mei_device *dev)
+{
+	struct mei_cl *cl;
+
+	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
+	if (!cl)
+		return NULL;
+
+	mei_cl_init(cl, dev);
+
+	return cl;
+}
+
+/**
+ * mei_cl_find_read_cb - find this cl's callback in the read list
+ *
+ * @dev: device structure
+ * returns cb on success, NULL on error
+ */
+struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
+{
+	struct mei_device *dev = cl->dev;
+	struct mei_cl_cb *cb = NULL;
+	struct mei_cl_cb *next = NULL;
+
+	list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
+		if (mei_cl_cmp_id(cl, cb->cl))
+			return cb;
+	return NULL;
+}
+
+/** mei_cl_link: allocte host id in the host map
+ *
+ * @cl - host client
+ * @id - fixed host id or -1 for genereting one
+ * returns 0 on success
+ *	-EINVAL on incorrect values
+ *	-ENONET if client not found
+ */
+int mei_cl_link(struct mei_cl *cl, int id)
+{
+	struct mei_device *dev;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -EINVAL;
+
+	dev = cl->dev;
+
+	/* If Id is not asigned get one*/
+	if (id == MEI_HOST_CLIENT_ID_ANY)
+		id = find_first_zero_bit(dev->host_clients_map,
+					MEI_CLIENTS_MAX);
+
+	if (id >= MEI_CLIENTS_MAX) {
+		dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
+		return -ENOENT;
+	}
+
+	dev->open_handle_count++;
+
+	cl->host_client_id = id;
+	list_add_tail(&cl->link, &dev->file_list);
+
+	set_bit(id, dev->host_clients_map);
+
+	cl->state = MEI_FILE_INITIALIZING;
+
+	dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
+	return 0;
+}
+
+/**
+ * mei_cl_unlink - remove me_cl from the list
+ *
+ * @dev: the device structure
+ */
+int mei_cl_unlink(struct mei_cl *cl)
+{
+	struct mei_device *dev;
+	struct mei_cl *pos, *next;
+
+	/* don't shout on error exit path */
+	if (!cl)
+		return 0;
+
+	/* wd and amthif might not be initialized */
+	if (!cl->dev)
+		return 0;
+
+	dev = cl->dev;
+
+	list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+		if (cl->host_client_id == pos->host_client_id) {
+			dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
+				pos->host_client_id, pos->me_client_id);
+			list_del_init(&pos->link);
+			break;
+		}
+	}
+	return 0;
+}
+
+
+void mei_host_client_init(struct work_struct *work)
+{
+	struct mei_device *dev = container_of(work,
+					      struct mei_device, init_work);
+	struct mei_client_properties *client_props;
+	int i;
+
+	mutex_lock(&dev->device_lock);
+
+	bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
+	dev->open_handle_count = 0;
+
+	/*
+	 * Reserving the first three client IDs
+	 * 0: Reserved for MEI Bus Message communications
+	 * 1: Reserved for Watchdog
+	 * 2: Reserved for AMTHI
+	 */
+	bitmap_set(dev->host_clients_map, 0, 3);
+
+	for (i = 0; i < dev->me_clients_num; i++) {
+		client_props = &dev->me_clients[i].props;
+
+		if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
+			mei_amthif_host_init(dev);
+		else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
+			mei_wd_host_init(dev);
+	}
+
+	dev->dev_state = MEI_DEV_ENABLED;
+
+	mutex_unlock(&dev->device_lock);
+}
+
+
+/**
+ * mei_cl_disconnect - disconnect host clinet form the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_disconnect(struct mei_cl *cl)
+{
+	struct mei_device *dev;
+	struct mei_cl_cb *cb;
+	int rets, err;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	if (cl->state != MEI_FILE_DISCONNECTING)
+		return 0;
+
+	cb = mei_io_cb_init(cl, NULL);
+	if (!cb)
+		return -ENOMEM;
+
+	cb->fop_type = MEI_FOP_CLOSE;
+	if (dev->hbuf_is_ready) {
+		dev->hbuf_is_ready = false;
+		if (mei_hbm_cl_disconnect_req(dev, cl)) {
+			rets = -ENODEV;
+			dev_err(&dev->pdev->dev, "failed to disconnect.\n");
+			goto free;
+		}
+		mdelay(10); /* Wait for hardware disconnection ready */
+		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+	} else {
+		dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
+		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+
+	}
+	mutex_unlock(&dev->device_lock);
+
+	err = wait_event_timeout(dev->wait_recvd_msg,
+			MEI_FILE_DISCONNECTED == cl->state,
+			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+
+	mutex_lock(&dev->device_lock);
+	if (MEI_FILE_DISCONNECTED == cl->state) {
+		rets = 0;
+		dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
+	} else {
+		rets = -ENODEV;
+		if (MEI_FILE_DISCONNECTED != cl->state)
+			dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
+
+		if (err)
+			dev_dbg(&dev->pdev->dev,
+					"wait failed disconnect err=%08x\n",
+					err);
+
+		dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
+	}
+
+	mei_io_list_flush(&dev->ctrl_rd_list, cl);
+	mei_io_list_flush(&dev->ctrl_wr_list, cl);
+free:
+	mei_io_cb_free(cb);
+	return rets;
+}
+
+
+/**
+ * mei_cl_is_other_connecting - checks if other
+ *    client with the same me client id is connecting
+ *
+ * @cl: private data of the file object
+ *
+ * returns ture if other client is connected, 0 - otherwise.
+ */
+bool mei_cl_is_other_connecting(struct mei_cl *cl)
+{
+	struct mei_device *dev;
+	struct mei_cl *pos;
+	struct mei_cl *next;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return false;
+
+	dev = cl->dev;
+
+	list_for_each_entry_safe(pos, next, &dev->file_list, link) {
+		if ((pos->state == MEI_FILE_CONNECTING) &&
+		    (pos != cl) && cl->me_client_id == pos->me_client_id)
+			return true;
+
+	}
+
+	return false;
+}
+
+/**
+ * mei_cl_connect - connect host clinet to the me one
+ *
+ * @cl: host client
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_connect(struct mei_cl *cl, struct file *file)
+{
+	struct mei_device *dev;
+	struct mei_cl_cb *cb;
+	long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
+	int rets;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	cb = mei_io_cb_init(cl, file);
+	if (!cb) {
+		rets = -ENOMEM;
+		goto out;
+	}
+
+	cb->fop_type = MEI_FOP_IOCTL;
+
+	if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
+		dev->hbuf_is_ready = false;
+
+		if (mei_hbm_cl_connect_req(dev, cl)) {
+			rets = -ENODEV;
+			goto out;
+		}
+		cl->timer_count = MEI_CONNECT_TIMEOUT;
+		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
+	} else {
+		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+	}
+
+	mutex_unlock(&dev->device_lock);
+	rets = wait_event_timeout(dev->wait_recvd_msg,
+				 (cl->state == MEI_FILE_CONNECTED ||
+				  cl->state == MEI_FILE_DISCONNECTED),
+				 timeout * HZ);
+	mutex_lock(&dev->device_lock);
+
+	if (cl->state != MEI_FILE_CONNECTED) {
+		rets = -EFAULT;
+
+		mei_io_list_flush(&dev->ctrl_rd_list, cl);
+		mei_io_list_flush(&dev->ctrl_wr_list, cl);
+		goto out;
+	}
+
+	rets = cl->status;
+
+out:
+	mei_io_cb_free(cb);
+	return rets;
+}
+
+/**
+ * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ *
+ * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
+ *	-ENOENT if mei_cl is not present
+ *	-EINVAL if single_recv_buf == 0
+ */
+int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
+{
+	struct mei_device *dev;
+	int i;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -EINVAL;
+
+	dev = cl->dev;
+
+	if (!dev->me_clients_num)
+		return 0;
+
+	if (cl->mei_flow_ctrl_creds > 0)
+		return 1;
+
+	for (i = 0; i < dev->me_clients_num; i++) {
+		struct mei_me_client  *me_cl = &dev->me_clients[i];
+		if (me_cl->client_id == cl->me_client_id) {
+			if (me_cl->mei_flow_ctrl_creds) {
+				if (WARN_ON(me_cl->props.single_recv_buf == 0))
+					return -EINVAL;
+				return 1;
+			} else {
+				return 0;
+			}
+		}
+	}
+	return -ENOENT;
+}
+
+/**
+ * mei_cl_flow_ctrl_reduce - reduces flow_control.
+ *
+ * @dev: the device structure
+ * @cl: private data of the file object
+ * @returns
+ *	0 on success
+ *	-ENOENT when me client is not found
+ *	-EINVAL when ctrl credits are <= 0
+ */
+int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
+{
+	struct mei_device *dev;
+	int i;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -EINVAL;
+
+	dev = cl->dev;
+
+	if (!dev->me_clients_num)
+		return -ENOENT;
+
+	for (i = 0; i < dev->me_clients_num; i++) {
+		struct mei_me_client  *me_cl = &dev->me_clients[i];
+		if (me_cl->client_id == cl->me_client_id) {
+			if (me_cl->props.single_recv_buf != 0) {
+				if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
+					return -EINVAL;
+				dev->me_clients[i].mei_flow_ctrl_creds--;
+			} else {
+				if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
+					return -EINVAL;
+				cl->mei_flow_ctrl_creds--;
+			}
+			return 0;
+		}
+	}
+	return -ENOENT;
+}
+
+/**
+ * mei_cl_start_read - the start read client message function.
+ *
+ * @cl: host client
+ *
+ * returns 0 on success, <0 on failure.
+ */
+int mei_cl_read_start(struct mei_cl *cl)
+{
+	struct mei_device *dev;
+	struct mei_cl_cb *cb;
+	int rets;
+	int i;
+
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	if (cl->state != MEI_FILE_CONNECTED)
+		return -ENODEV;
+
+	if (dev->dev_state != MEI_DEV_ENABLED)
+		return -ENODEV;
+
+	if (cl->read_cb) {
+		dev_dbg(&dev->pdev->dev, "read is pending.\n");
+		return -EBUSY;
+	}
+	i = mei_me_cl_by_id(dev, cl->me_client_id);
+	if (i < 0) {
+		dev_err(&dev->pdev->dev, "no such me client %d\n",
+			cl->me_client_id);
+		return  -ENODEV;
+	}
+
+	cb = mei_io_cb_init(cl, NULL);
+	if (!cb)
+		return -ENOMEM;
+
+	rets = mei_io_cb_alloc_resp_buf(cb,
+			dev->me_clients[i].props.max_msg_length);
+	if (rets)
+		goto err;
+
+	cb->fop_type = MEI_FOP_READ;
+	cl->read_cb = cb;
+	if (dev->hbuf_is_ready) {
+		dev->hbuf_is_ready = false;
+		if (mei_hbm_cl_flow_control_req(dev, cl)) {
+			rets = -ENODEV;
+			goto err;
+		}
+		list_add_tail(&cb->list, &dev->read_list.list);
+	} else {
+		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
+	}
+	return rets;
+err:
+	mei_io_cb_free(cb);
+	return rets;
+}
+
+/**
+ * mei_cl_all_disconnect - disconnect forcefully all connected clients
+ *
+ * @dev - mei device
+ */
+
+void mei_cl_all_disconnect(struct mei_device *dev)
+{
+	struct mei_cl *cl, *next;
+
+	list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+		cl->state = MEI_FILE_DISCONNECTED;
+		cl->mei_flow_ctrl_creds = 0;
+		cl->read_cb = NULL;
+		cl->timer_count = 0;
+	}
+}
+
+
+/**
+ * mei_cl_all_read_wakeup  - wake up all readings so they can be interrupted
+ *
+ * @dev  - mei device
+ */
+void mei_cl_all_read_wakeup(struct mei_device *dev)
+{
+	struct mei_cl *cl, *next;
+	list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+		if (waitqueue_active(&cl->rx_wait)) {
+			dev_dbg(&dev->pdev->dev, "Waking up client!\n");
+			wake_up_interruptible(&cl->rx_wait);
+		}
+	}
+}
+
+/**
+ * mei_cl_all_write_clear - clear all pending writes
+
+ * @dev - mei device
+ */
+void mei_cl_all_write_clear(struct mei_device *dev)
+{
+	struct mei_cl_cb *cb, *next;
+
+	list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+		list_del(&cb->list);
+		mei_io_cb_free(cb);
+	}
+}
+
+
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
new file mode 100644
index 0000000..214b239
--- /dev/null
+++ b/drivers/misc/mei/client.h
@@ -0,0 +1,102 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_CLIENT_H_
+#define _MEI_CLIENT_H_
+
+#include <linux/types.h>
+#include <linux/watchdog.h>
+#include <linux/poll.h>
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+
+int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
+int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
+
+/*
+ * MEI IO Functions
+ */
+struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
+void mei_io_cb_free(struct mei_cl_cb *priv_cb);
+int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
+int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
+
+
+/**
+ * mei_io_list_init - Sets up a queue list.
+ *
+ * @list: An instance cl callback structure
+ */
+static inline void mei_io_list_init(struct mei_cl_cb *list)
+{
+	INIT_LIST_HEAD(&list->list);
+}
+void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
+
+/*
+ * MEI Host Client Functions
+ */
+
+struct mei_cl *mei_cl_allocate(struct mei_device *dev);
+void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
+
+
+int mei_cl_link(struct mei_cl *cl, int id);
+int mei_cl_unlink(struct mei_cl *cl);
+
+int mei_cl_flush_queues(struct mei_cl *cl);
+struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl);
+
+/**
+ * mei_cl_cmp_id - tells if file private data have same id
+ *
+ * @fe1: private data of 1. file object
+ * @fe2: private data of 2. file object
+ *
+ * returns true  - if ids are the same and not NULL
+ */
+static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
+				const struct mei_cl *cl2)
+{
+	return cl1 && cl2 &&
+		(cl1->host_client_id == cl2->host_client_id) &&
+		(cl1->me_client_id == cl2->me_client_id);
+}
+
+
+int mei_cl_flow_ctrl_creds(struct mei_cl *cl);
+
+int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
+/*
+ *  MEI input output function prototype
+ */
+bool mei_cl_is_other_connecting(struct mei_cl *cl);
+int mei_cl_disconnect(struct mei_cl *cl);
+
+int mei_cl_read_start(struct mei_cl *cl);
+
+int mei_cl_connect(struct mei_cl *cl, struct file *file);
+
+void mei_host_client_init(struct work_struct *work);
+
+
+void mei_cl_all_disconnect(struct mei_device *dev);
+void mei_cl_all_read_wakeup(struct mei_device *dev);
+void mei_cl_all_write_clear(struct mei_device *dev);
+
+
+#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
new file mode 100644
index 0000000..fb9e63b
--- /dev/null
+++ b/drivers/misc/mei/hbm.c
@@ -0,0 +1,669 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+
+/**
+ * mei_hbm_me_cl_allocate - allocates storage for me clients
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+static void mei_hbm_me_cl_allocate(struct mei_device *dev)
+{
+	struct mei_me_client *clients;
+	int b;
+
+	/* count how many ME clients we have */
+	for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
+		dev->me_clients_num++;
+
+	if (dev->me_clients_num <= 0)
+		return;
+
+	kfree(dev->me_clients);
+	dev->me_clients = NULL;
+
+	dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
+		dev->me_clients_num * sizeof(struct mei_me_client));
+	/* allocate storage for ME clients representation */
+	clients = kcalloc(dev->me_clients_num,
+			sizeof(struct mei_me_client), GFP_KERNEL);
+	if (!clients) {
+		dev_err(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
+		dev->dev_state = MEI_DEV_RESETING;
+		mei_reset(dev, 1);
+		return;
+	}
+	dev->me_clients = clients;
+	return;
+}
+
+/**
+ * mei_hbm_cl_hdr - construct client hbm header
+ * @cl: - client
+ * @hbm_cmd: host bus message command
+ * @buf: buffer for cl header
+ * @len: buffer length
+ */
+static inline
+void mei_hbm_cl_hdr(struct mei_cl *cl, u8 hbm_cmd, void *buf, size_t len)
+{
+	struct mei_hbm_cl_cmd *cmd = buf;
+
+	memset(cmd, 0, len);
+
+	cmd->hbm_cmd = hbm_cmd;
+	cmd->host_addr = cl->host_client_id;
+	cmd->me_addr = cl->me_client_id;
+}
+
+/**
+ * same_disconn_addr - tells if they have the same address
+ *
+ * @file: private data of the file object.
+ * @disconn: disconnection request.
+ *
+ * returns true if addres are same
+ */
+static inline
+bool mei_hbm_cl_addr_equal(struct mei_cl *cl, void *buf)
+{
+	struct mei_hbm_cl_cmd *cmd = buf;
+	return cl->host_client_id == cmd->host_addr &&
+		cl->me_client_id == cmd->me_addr;
+}
+
+
+/**
+ * is_treat_specially_client - checks if the message belongs
+ * to the file private data.
+ *
+ * @cl: private data of the file object
+ * @rs: connect response bus message
+ *
+ */
+static bool is_treat_specially_client(struct mei_cl *cl,
+		struct hbm_client_connect_response *rs)
+{
+	if (mei_hbm_cl_addr_equal(cl, rs)) {
+		if (!rs->status) {
+			cl->state = MEI_FILE_CONNECTED;
+			cl->status = 0;
+
+		} else {
+			cl->state = MEI_FILE_DISCONNECTED;
+			cl->status = -ENODEV;
+		}
+		cl->timer_count = 0;
+
+		return true;
+	}
+	return false;
+}
+
+/**
+ * mei_hbm_start_req - sends start request message.
+ *
+ * @dev: the device structure
+ */
+void mei_hbm_start_req(struct mei_device *dev)
+{
+	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+	struct hbm_host_version_request *start_req;
+	const size_t len = sizeof(struct hbm_host_version_request);
+
+	mei_hbm_hdr(mei_hdr, len);
+
+	/* host start message */
+	start_req = (struct hbm_host_version_request *)dev->wr_msg.data;
+	memset(start_req, 0, len);
+	start_req->hbm_cmd = HOST_START_REQ_CMD;
+	start_req->host_version.major_version = HBM_MAJOR_VERSION;
+	start_req->host_version.minor_version = HBM_MINOR_VERSION;
+
+	dev->recvd_msg = false;
+	if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+		dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
+		dev->dev_state = MEI_DEV_RESETING;
+		mei_reset(dev, 1);
+	}
+	dev->init_clients_state = MEI_START_MESSAGE;
+	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+	return ;
+}
+
+/**
+ * mei_hbm_enum_clients_req - sends enumeration client request message.
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+static void mei_hbm_enum_clients_req(struct mei_device *dev)
+{
+	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+	struct hbm_host_enum_request *enum_req;
+	const size_t len = sizeof(struct hbm_host_enum_request);
+	/* enumerate clients */
+	mei_hbm_hdr(mei_hdr, len);
+
+	enum_req = (struct hbm_host_enum_request *)dev->wr_msg.data;
+	memset(enum_req, 0, len);
+	enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
+
+	if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+		dev->dev_state = MEI_DEV_RESETING;
+		dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
+		mei_reset(dev, 1);
+	}
+	dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
+	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+	return;
+}
+
+/**
+ * mei_hbm_prop_requsest - request property for a single client
+ *
+ * @dev: the device structure
+ *
+ * returns none.
+ */
+
+static int mei_hbm_prop_req(struct mei_device *dev)
+{
+
+	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+	struct hbm_props_request *prop_req;
+	const size_t len = sizeof(struct hbm_props_request);
+	unsigned long next_client_index;
+	u8 client_num;
+
+
+	client_num = dev->me_client_presentation_num;
+
+	next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
+					  dev->me_client_index);
+
+	/* We got all client properties */
+	if (next_client_index == MEI_CLIENTS_MAX) {
+		schedule_work(&dev->init_work);
+
+		return 0;
+	}
+
+	dev->me_clients[client_num].client_id = next_client_index;
+	dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
+
+	mei_hbm_hdr(mei_hdr, len);
+	prop_req = (struct hbm_props_request *)dev->wr_msg.data;
+
+	memset(prop_req, 0, sizeof(struct hbm_props_request));
+
+
+	prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
+	prop_req->address = next_client_index;
+
+	if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
+		dev->dev_state = MEI_DEV_RESETING;
+		dev_err(&dev->pdev->dev, "Properties request command failed\n");
+		mei_reset(dev, 1);
+
+		return -EIO;
+	}
+
+	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+	dev->me_client_index = next_client_index;
+
+	return 0;
+}
+
+/**
+ * mei_hbm_stop_req_prepare - perpare stop request message
+ *
+ * @dev - mei device
+ * @mei_hdr - mei message header
+ * @data - hbm message body buffer
+ */
+static void mei_hbm_stop_req_prepare(struct mei_device *dev,
+		struct mei_msg_hdr *mei_hdr, unsigned char *data)
+{
+	struct hbm_host_stop_request *req =
+			(struct hbm_host_stop_request *)data;
+	const size_t len = sizeof(struct hbm_host_stop_request);
+
+	mei_hbm_hdr(mei_hdr, len);
+
+	memset(req, 0, len);
+	req->hbm_cmd = HOST_STOP_REQ_CMD;
+	req->reason = DRIVER_STOP_REQUEST;
+}
+
+/**
+ * mei_hbm_cl_flow_control_req - sends flow control requst.
+ *
+ * @dev: the device structure
+ * @cl: client info
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl)
+{
+	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+	const size_t len = sizeof(struct hbm_flow_control);
+
+	mei_hbm_hdr(mei_hdr, len);
+	mei_hbm_cl_hdr(cl, MEI_FLOW_CONTROL_CMD, dev->wr_msg.data, len);
+
+	dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
+		cl->host_client_id, cl->me_client_id);
+
+	return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * add_single_flow_creds - adds single buffer credentials.
+ *
+ * @file: private data ot the file object.
+ * @flow: flow control.
+ */
+static void mei_hbm_add_single_flow_creds(struct mei_device *dev,
+				  struct hbm_flow_control *flow)
+{
+	struct mei_me_client *client;
+	int i;
+
+	for (i = 0; i < dev->me_clients_num; i++) {
+		client = &dev->me_clients[i];
+		if (client && flow->me_addr == client->client_id) {
+			if (client->props.single_recv_buf) {
+				client->mei_flow_ctrl_creds++;
+				dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
+				    flow->me_addr);
+				dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
+				    client->mei_flow_ctrl_creds);
+			} else {
+				BUG();	/* error in flow control */
+			}
+		}
+	}
+}
+
+/**
+ * mei_hbm_cl_flow_control_res - flow control response from me
+ *
+ * @dev: the device structure
+ * @flow_control: flow control response bus message
+ */
+static void mei_hbm_cl_flow_control_res(struct mei_device *dev,
+		struct hbm_flow_control *flow_control)
+{
+	struct mei_cl *cl = NULL;
+	struct mei_cl *next = NULL;
+
+	if (!flow_control->host_addr) {
+		/* single receive buffer */
+		mei_hbm_add_single_flow_creds(dev, flow_control);
+		return;
+	}
+
+	/* normal connection */
+	list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+		if (mei_hbm_cl_addr_equal(cl, flow_control)) {
+			cl->mei_flow_ctrl_creds++;
+			dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
+				flow_control->host_addr, flow_control->me_addr);
+			dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
+				    cl->mei_flow_ctrl_creds);
+				break;
+		}
+	}
+}
+
+
+/**
+ * mei_hbm_cl_disconnect_req - sends disconnect message to fw.
+ *
+ * @dev: the device structure
+ * @cl: a client to disconnect from
+ *
+ * This function returns -EIO on write failure
+ */
+int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl)
+{
+	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+	const size_t len = sizeof(struct hbm_client_connect_request);
+
+	mei_hbm_hdr(mei_hdr, len);
+	mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_REQ_CMD, dev->wr_msg.data, len);
+
+	return mei_write_message(dev, mei_hdr, dev->wr_msg.data);
+}
+
+/**
+ * mei_hbm_cl_disconnect_res - disconnect response from ME
+ *
+ * @dev: the device structure
+ * @rs: disconnect response bus message
+ */
+static void mei_hbm_cl_disconnect_res(struct mei_device *dev,
+		struct hbm_client_connect_response *rs)
+{
+	struct mei_cl *cl;
+	struct mei_cl_cb *pos = NULL, *next = NULL;
+
+	dev_dbg(&dev->pdev->dev,
+			"disconnect_response:\n"
+			"ME Client = %d\n"
+			"Host Client = %d\n"
+			"Status = %d\n",
+			rs->me_addr,
+			rs->host_addr,
+			rs->status);
+
+	list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+		cl = pos->cl;
+
+		if (!cl) {
+			list_del(&pos->list);
+			return;
+		}
+
+		dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
+		if (mei_hbm_cl_addr_equal(cl, rs)) {
+			list_del(&pos->list);
+			if (!rs->status)
+				cl->state = MEI_FILE_DISCONNECTED;
+
+			cl->status = 0;
+			cl->timer_count = 0;
+			break;
+		}
+	}
+}
+
+/**
+ * mei_hbm_cl_connect_req - send connection request to specific me client
+ *
+ * @dev: the device structure
+ * @cl: a client to connect to
+ *
+ * returns -EIO on write failure
+ */
+int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
+{
+	struct mei_msg_hdr *mei_hdr = &dev->wr_msg.hdr;
+	const size_t len = sizeof(struct hbm_client_connect_request);
+
+	mei_hbm_hdr(mei_hdr, len);
+	mei_hbm_cl_hdr(cl, CLIENT_CONNECT_REQ_CMD, dev->wr_msg.data, len);
+
+	return mei_write_message(dev, mei_hdr,  dev->wr_msg.data);
+}
+
+/**
+ * mei_hbm_cl_connect_res - connect resposne from the ME
+ *
+ * @dev: the device structure
+ * @rs: connect response bus message
+ */
+static void mei_hbm_cl_connect_res(struct mei_device *dev,
+		struct hbm_client_connect_response *rs)
+{
+
+	struct mei_cl *cl;
+	struct mei_cl_cb *pos = NULL, *next = NULL;
+
+	dev_dbg(&dev->pdev->dev,
+			"connect_response:\n"
+			"ME Client = %d\n"
+			"Host Client = %d\n"
+			"Status = %d\n",
+			rs->me_addr,
+			rs->host_addr,
+			rs->status);
+
+	/* if WD or iamthif client treat specially */
+
+	if (is_treat_specially_client(&dev->wd_cl, rs)) {
+		dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
+		mei_watchdog_register(dev);
+
+		return;
+	}
+
+	if (is_treat_specially_client(&dev->iamthif_cl, rs)) {
+		dev->iamthif_state = MEI_IAMTHIF_IDLE;
+		return;
+	}
+	list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
+
+		cl = pos->cl;
+		if (!cl) {
+			list_del(&pos->list);
+			return;
+		}
+		if (pos->fop_type == MEI_FOP_IOCTL) {
+			if (is_treat_specially_client(cl, rs)) {
+				list_del(&pos->list);
+				cl->status = 0;
+				cl->timer_count = 0;
+				break;
+			}
+		}
+	}
+}
+
+
+/**
+ * mei_client_disconnect_request - disconnect request initiated by me
+ *  host sends disoconnect response
+ *
+ * @dev: the device structure.
+ * @disconnect_req: disconnect request bus message from the me
+ */
+static void mei_hbm_fw_disconnect_req(struct mei_device *dev,
+		struct hbm_client_connect_request *disconnect_req)
+{
+	struct mei_cl *cl, *next;
+	const size_t len = sizeof(struct hbm_client_connect_response);
+
+	list_for_each_entry_safe(cl, next, &dev->file_list, link) {
+		if (mei_hbm_cl_addr_equal(cl, disconnect_req)) {
+			dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
+					disconnect_req->host_addr,
+					disconnect_req->me_addr);
+			cl->state = MEI_FILE_DISCONNECTED;
+			cl->timer_count = 0;
+			if (cl == &dev->wd_cl)
+				dev->wd_pending = false;
+			else if (cl == &dev->iamthif_cl)
+				dev->iamthif_timer = 0;
+
+			/* prepare disconnect response */
+			mei_hbm_hdr(&dev->wr_ext_msg.hdr, len);
+			mei_hbm_cl_hdr(cl, CLIENT_DISCONNECT_RES_CMD,
+					 dev->wr_ext_msg.data, len);
+			break;
+		}
+	}
+}
+
+
+/**
+ * mei_hbm_dispatch - bottom half read routine after ISR to
+ * handle the read bus message cmd processing.
+ *
+ * @dev: the device structure
+ * @mei_hdr: header of bus message
+ */
+void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+{
+	struct mei_bus_message *mei_msg;
+	struct mei_me_client *me_client;
+	struct hbm_host_version_response *version_res;
+	struct hbm_client_connect_response *connect_res;
+	struct hbm_client_connect_response *disconnect_res;
+	struct hbm_client_connect_request *disconnect_req;
+	struct hbm_flow_control *flow_control;
+	struct hbm_props_response *props_res;
+	struct hbm_host_enum_response *enum_res;
+
+	/* read the message to our buffer */
+	BUG_ON(hdr->length >= sizeof(dev->rd_msg_buf));
+	mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
+	mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
+
+	switch (mei_msg->hbm_cmd) {
+	case HOST_START_RES_CMD:
+		version_res = (struct hbm_host_version_response *)mei_msg;
+		if (!version_res->host_version_supported) {
+			dev->version = version_res->me_max_version;
+			dev_dbg(&dev->pdev->dev, "version mismatch.\n");
+
+			mei_hbm_stop_req_prepare(dev, &dev->wr_msg.hdr,
+						dev->wr_msg.data);
+			mei_write_message(dev, &dev->wr_msg.hdr,
+					dev->wr_msg.data);
+			return;
+		}
+
+		dev->version.major_version = HBM_MAJOR_VERSION;
+		dev->version.minor_version = HBM_MINOR_VERSION;
+		if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+		    dev->init_clients_state == MEI_START_MESSAGE) {
+			dev->init_clients_timer = 0;
+			mei_hbm_enum_clients_req(dev);
+		} else {
+			dev->recvd_msg = false;
+			dev_dbg(&dev->pdev->dev, "reset due to received hbm: host start\n");
+			mei_reset(dev, 1);
+			return;
+		}
+
+		dev->recvd_msg = true;
+		dev_dbg(&dev->pdev->dev, "host start response message received.\n");
+		break;
+
+	case CLIENT_CONNECT_RES_CMD:
+		connect_res = (struct hbm_client_connect_response *) mei_msg;
+		mei_hbm_cl_connect_res(dev, connect_res);
+		dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
+		wake_up(&dev->wait_recvd_msg);
+		break;
+
+	case CLIENT_DISCONNECT_RES_CMD:
+		disconnect_res = (struct hbm_client_connect_response *) mei_msg;
+		mei_hbm_cl_disconnect_res(dev, disconnect_res);
+		dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
+		wake_up(&dev->wait_recvd_msg);
+		break;
+
+	case MEI_FLOW_CONTROL_CMD:
+		flow_control = (struct hbm_flow_control *) mei_msg;
+		mei_hbm_cl_flow_control_res(dev, flow_control);
+		dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
+		break;
+
+	case HOST_CLIENT_PROPERTIES_RES_CMD:
+		props_res = (struct hbm_props_response *)mei_msg;
+		me_client = &dev->me_clients[dev->me_client_presentation_num];
+
+		if (props_res->status || !dev->me_clients) {
+			dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
+			mei_reset(dev, 1);
+			return;
+		}
+
+		if (me_client->client_id != props_res->address) {
+			dev_err(&dev->pdev->dev,
+				"Host client properties reply mismatch\n");
+			mei_reset(dev, 1);
+
+			return;
+		}
+
+		if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
+		    dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
+			dev_err(&dev->pdev->dev,
+				"Unexpected client properties reply\n");
+			mei_reset(dev, 1);
+
+			return;
+		}
+
+		me_client->props = props_res->client_properties;
+		dev->me_client_index++;
+		dev->me_client_presentation_num++;
+
+		/* request property for the next client */
+		mei_hbm_prop_req(dev);
+
+		break;
+
+	case HOST_ENUM_RES_CMD:
+		enum_res = (struct hbm_host_enum_response *) mei_msg;
+		memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
+		if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
+		    dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
+				dev->init_clients_timer = 0;
+				dev->me_client_presentation_num = 0;
+				dev->me_client_index = 0;
+				mei_hbm_me_cl_allocate(dev);
+				dev->init_clients_state =
+					MEI_CLIENT_PROPERTIES_MESSAGE;
+
+				/* first property reqeust */
+				mei_hbm_prop_req(dev);
+		} else {
+			dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
+			mei_reset(dev, 1);
+			return;
+		}
+		break;
+
+	case HOST_STOP_RES_CMD:
+		dev->dev_state = MEI_DEV_DISABLED;
+		dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
+		mei_reset(dev, 1);
+		break;
+
+	case CLIENT_DISCONNECT_REQ_CMD:
+		/* search for client */
+		disconnect_req = (struct hbm_client_connect_request *)mei_msg;
+		mei_hbm_fw_disconnect_req(dev, disconnect_req);
+		break;
+
+	case ME_STOP_REQ_CMD:
+
+		mei_hbm_stop_req_prepare(dev, &dev->wr_ext_msg.hdr,
+					dev->wr_ext_msg.data);
+		break;
+	default:
+		BUG();
+		break;
+
+	}
+}
+
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
new file mode 100644
index 0000000..b552afb
--- /dev/null
+++ b/drivers/misc/mei/hbm.h
@@ -0,0 +1,39 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _MEI_HBM_H_
+#define _MEI_HBM_H_
+
+void mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr);
+
+static inline void mei_hbm_hdr(struct mei_msg_hdr *hdr, size_t length)
+{
+	hdr->host_addr = 0;
+	hdr->me_addr = 0;
+	hdr->length = length;
+	hdr->msg_complete = 1;
+	hdr->reserved = 0;
+}
+
+void mei_hbm_start_req(struct mei_device *dev);
+
+int mei_hbm_cl_flow_control_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_disconnect_req(struct mei_device *dev, struct mei_cl *cl);
+int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl);
+
+
+#endif /* _MEI_HBM_H_ */
+
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
new file mode 100644
index 0000000..6a203b6
--- /dev/null
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -0,0 +1,167 @@
+/******************************************************************************
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Intel MEI Interface Header
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *	Intel Corporation.
+ *	linux-mei@linux.intel.com
+ *	http://www.intel.com
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2003 - 2012 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef _MEI_HW_MEI_REGS_H_
+#define _MEI_HW_MEI_REGS_H_
+
+/*
+ * MEI device IDs
+ */
+#define MEI_DEV_ID_82946GZ    0x2974  /* 82946GZ/GL */
+#define MEI_DEV_ID_82G35      0x2984  /* 82G35 Express */
+#define MEI_DEV_ID_82Q965     0x2994  /* 82Q963/Q965 */
+#define MEI_DEV_ID_82G965     0x29A4  /* 82P965/G965 */
+
+#define MEI_DEV_ID_82GM965    0x2A04  /* Mobile PM965/GM965 */
+#define MEI_DEV_ID_82GME965   0x2A14  /* Mobile GME965/GLE960 */
+
+#define MEI_DEV_ID_ICH9_82Q35 0x29B4  /* 82Q35 Express */
+#define MEI_DEV_ID_ICH9_82G33 0x29C4  /* 82G33/G31/P35/P31 Express */
+#define MEI_DEV_ID_ICH9_82Q33 0x29D4  /* 82Q33 Express */
+#define MEI_DEV_ID_ICH9_82X38 0x29E4  /* 82X38/X48 Express */
+#define MEI_DEV_ID_ICH9_3200  0x29F4  /* 3200/3210 Server */
+
+#define MEI_DEV_ID_ICH9_6     0x28B4  /* Bearlake */
+#define MEI_DEV_ID_ICH9_7     0x28C4  /* Bearlake */
+#define MEI_DEV_ID_ICH9_8     0x28D4  /* Bearlake */
+#define MEI_DEV_ID_ICH9_9     0x28E4  /* Bearlake */
+#define MEI_DEV_ID_ICH9_10    0x28F4  /* Bearlake */
+
+#define MEI_DEV_ID_ICH9M_1    0x2A44  /* Cantiga */
+#define MEI_DEV_ID_ICH9M_2    0x2A54  /* Cantiga */
+#define MEI_DEV_ID_ICH9M_3    0x2A64  /* Cantiga */
+#define MEI_DEV_ID_ICH9M_4    0x2A74  /* Cantiga */
+
+#define MEI_DEV_ID_ICH10_1    0x2E04  /* Eaglelake */
+#define MEI_DEV_ID_ICH10_2    0x2E14  /* Eaglelake */
+#define MEI_DEV_ID_ICH10_3    0x2E24  /* Eaglelake */
+#define MEI_DEV_ID_ICH10_4    0x2E34  /* Eaglelake */
+
+#define MEI_DEV_ID_IBXPK_1    0x3B64  /* Calpella */
+#define MEI_DEV_ID_IBXPK_2    0x3B65  /* Calpella */
+
+#define MEI_DEV_ID_CPT_1      0x1C3A  /* Couger Point */
+#define MEI_DEV_ID_PBG_1      0x1D3A  /* C600/X79 Patsburg */
+
+#define MEI_DEV_ID_PPT_1      0x1E3A  /* Panther Point */
+#define MEI_DEV_ID_PPT_2      0x1CBA  /* Panther Point */
+#define MEI_DEV_ID_PPT_3      0x1DBA  /* Panther Point */
+
+#define MEI_DEV_ID_LPT        0x8C3A  /* Lynx Point */
+#define MEI_DEV_ID_LPT_LP     0x9C3A  /* Lynx Point LP */
+/*
+ * MEI HW Section
+ */
+
+/* MEI registers */
+/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
+#define H_CB_WW    0
+/* H_CSR - Host Control Status register */
+#define H_CSR      4
+/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
+#define ME_CB_RW   8
+/* ME_CSR_HA - ME Control Status Host Access register (read only) */
+#define ME_CSR_HA  0xC
+
+
+/* register bits of H_CSR (Host Control Status register) */
+/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
+#define H_CBD             0xFF000000
+/* Host Circular Buffer Write Pointer */
+#define H_CBWP            0x00FF0000
+/* Host Circular Buffer Read Pointer */
+#define H_CBRP            0x0000FF00
+/* Host Reset */
+#define H_RST             0x00000010
+/* Host Ready */
+#define H_RDY             0x00000008
+/* Host Interrupt Generate */
+#define H_IG              0x00000004
+/* Host Interrupt Status */
+#define H_IS              0x00000002
+/* Host Interrupt Enable */
+#define H_IE              0x00000001
+
+
+/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
+/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
+access to ME_CBD */
+#define ME_CBD_HRA        0xFF000000
+/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
+#define ME_CBWP_HRA       0x00FF0000
+/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
+#define ME_CBRP_HRA       0x0000FF00
+/* ME Reset HRA - host read only access to ME_RST */
+#define ME_RST_HRA        0x00000010
+/* ME Ready HRA - host read only access to ME_RDY */
+#define ME_RDY_HRA        0x00000008
+/* ME Interrupt Generate HRA - host read only access to ME_IG */
+#define ME_IG_HRA         0x00000004
+/* ME Interrupt Status HRA - host read only access to ME_IS */
+#define ME_IS_HRA         0x00000002
+/* ME Interrupt Enable HRA - host read only access to ME_IE */
+#define ME_IE_HRA         0x00000001
+
+#endif /* _MEI_HW_MEI_REGS_H_ */
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
new file mode 100644
index 0000000..45ea718
--- /dev/null
+++ b/drivers/misc/mei/hw-me.c
@@ -0,0 +1,576 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/pci.h>
+
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+
+#include "hbm.h"
+
+
+/**
+ * mei_reg_read - Reads 32bit data from the mei device
+ *
+ * @dev: the device structure
+ * @offset: offset from which to read the data
+ *
+ * returns register value (u32)
+ */
+static inline u32 mei_reg_read(const struct mei_me_hw *hw,
+			       unsigned long offset)
+{
+	return ioread32(hw->mem_addr + offset);
+}
+
+
+/**
+ * mei_reg_write - Writes 32bit data to the mei device
+ *
+ * @dev: the device structure
+ * @offset: offset from which to write the data
+ * @value: register value to write (u32)
+ */
+static inline void mei_reg_write(const struct mei_me_hw *hw,
+				 unsigned long offset, u32 value)
+{
+	iowrite32(value, hw->mem_addr + offset);
+}
+
+/**
+ * mei_mecbrw_read - Reads 32bit data from ME circular buffer
+ *  read window register
+ *
+ * @dev: the device structure
+ *
+ * returns ME_CB_RW register value (u32)
+ */
+static u32 mei_me_mecbrw_read(const struct mei_device *dev)
+{
+	return mei_reg_read(to_me_hw(dev), ME_CB_RW);
+}
+/**
+ * mei_mecsr_read - Reads 32bit data from the ME CSR
+ *
+ * @dev: the device structure
+ *
+ * returns ME_CSR_HA register value (u32)
+ */
+static inline u32 mei_mecsr_read(const struct mei_me_hw *hw)
+{
+	return mei_reg_read(hw, ME_CSR_HA);
+}
+
+/**
+ * mei_hcsr_read - Reads 32bit data from the host CSR
+ *
+ * @dev: the device structure
+ *
+ * returns H_CSR register value (u32)
+ */
+static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
+{
+	return mei_reg_read(hw, H_CSR);
+}
+
+/**
+ * mei_hcsr_set - writes H_CSR register to the mei device,
+ * and ignores the H_IS bit for it is write-one-to-zero.
+ *
+ * @dev: the device structure
+ */
+static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
+{
+	hcsr &= ~H_IS;
+	mei_reg_write(hw, H_CSR, hcsr);
+}
+
+
+/**
+ * me_hw_config - configure hw dependent settings
+ *
+ * @dev: mei device
+ */
+static void mei_me_hw_config(struct mei_device *dev)
+{
+	u32 hcsr = mei_hcsr_read(to_me_hw(dev));
+	/* Doesn't change in runtime */
+	dev->hbuf_depth = (hcsr & H_CBD) >> 24;
+}
+/**
+ * mei_clear_interrupts - clear and stop interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_clear(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	u32 hcsr = mei_hcsr_read(hw);
+	if ((hcsr & H_IS) == H_IS)
+		mei_reg_write(hw, H_CSR, hcsr);
+}
+/**
+ * mei_me_intr_enable - enables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_enable(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	u32 hcsr = mei_hcsr_read(hw);
+	hcsr |= H_IE;
+	mei_hcsr_set(hw, hcsr);
+}
+
+/**
+ * mei_disable_interrupts - disables mei device interrupts
+ *
+ * @dev: the device structure
+ */
+static void mei_me_intr_disable(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	u32 hcsr = mei_hcsr_read(hw);
+	hcsr  &= ~H_IE;
+	mei_hcsr_set(hw, hcsr);
+}
+
+/**
+ * mei_me_hw_reset - resets fw via mei csr register.
+ *
+ * @dev: the device structure
+ * @interrupts_enabled: if interrupt should be enabled after reset.
+ */
+static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	u32 hcsr = mei_hcsr_read(hw);
+
+	dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr);
+
+	hcsr |= (H_RST | H_IG);
+
+	if (intr_enable)
+		hcsr |= H_IE;
+	else
+		hcsr &= ~H_IE;
+
+	mei_hcsr_set(hw, hcsr);
+
+	hcsr = mei_hcsr_read(hw) | H_IG;
+	hcsr &= ~H_RST;
+
+	mei_hcsr_set(hw, hcsr);
+
+	hcsr = mei_hcsr_read(hw);
+
+	dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr);
+}
+
+/**
+ * mei_me_host_set_ready - enable device
+ *
+ * @dev - mei device
+ * returns bool
+ */
+
+static void mei_me_host_set_ready(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	hw->host_hw_state |= H_IE | H_IG | H_RDY;
+	mei_hcsr_set(hw, hw->host_hw_state);
+}
+/**
+ * mei_me_host_is_ready - check whether the host has turned ready
+ *
+ * @dev - mei device
+ * returns bool
+ */
+static bool mei_me_host_is_ready(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	hw->host_hw_state = mei_hcsr_read(hw);
+	return (hw->host_hw_state & H_RDY) == H_RDY;
+}
+
+/**
+ * mei_me_hw_is_ready - check whether the me(hw) has turned ready
+ *
+ * @dev - mei device
+ * returns bool
+ */
+static bool mei_me_hw_is_ready(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	hw->me_hw_state = mei_mecsr_read(hw);
+	return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
+}
+
+/**
+ * mei_hbuf_filled_slots - gets number of device filled buffer slots
+ *
+ * @dev: the device structure
+ *
+ * returns number of filled slots
+ */
+static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	char read_ptr, write_ptr;
+
+	hw->host_hw_state = mei_hcsr_read(hw);
+
+	read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
+	write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
+
+	return (unsigned char) (write_ptr - read_ptr);
+}
+
+/**
+ * mei_hbuf_is_empty - checks if host buffer is empty.
+ *
+ * @dev: the device structure
+ *
+ * returns true if empty, false - otherwise.
+ */
+static bool mei_me_hbuf_is_empty(struct mei_device *dev)
+{
+	return mei_hbuf_filled_slots(dev) == 0;
+}
+
+/**
+ * mei_me_hbuf_empty_slots - counts write empty slots.
+ *
+ * @dev: the device structure
+ *
+ * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
+ */
+static int mei_me_hbuf_empty_slots(struct mei_device *dev)
+{
+	unsigned char filled_slots, empty_slots;
+
+	filled_slots = mei_hbuf_filled_slots(dev);
+	empty_slots = dev->hbuf_depth - filled_slots;
+
+	/* check for overflow */
+	if (filled_slots > dev->hbuf_depth)
+		return -EOVERFLOW;
+
+	return empty_slots;
+}
+
+static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
+{
+	return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
+}
+
+
+/**
+ * mei_write_message - writes a message to mei device.
+ *
+ * @dev: the device structure
+ * @header: mei HECI header of message
+ * @buf: message payload will be written
+ *
+ * This function returns -EIO if write has failed
+ */
+static int mei_me_write_message(struct mei_device *dev,
+			struct mei_msg_hdr *header,
+			unsigned char *buf)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	unsigned long rem, dw_cnt;
+	unsigned long length = header->length;
+	u32 *reg_buf = (u32 *)buf;
+	u32 hcsr;
+	int i;
+	int empty_slots;
+
+	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
+
+	empty_slots = mei_hbuf_empty_slots(dev);
+	dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
+
+	dw_cnt = mei_data2slots(length);
+	if (empty_slots < 0 || dw_cnt > empty_slots)
+		return -EIO;
+
+	mei_reg_write(hw, H_CB_WW, *((u32 *) header));
+
+	for (i = 0; i < length / 4; i++)
+		mei_reg_write(hw, H_CB_WW, reg_buf[i]);
+
+	rem = length & 0x3;
+	if (rem > 0) {
+		u32 reg = 0;
+		memcpy(&reg, &buf[length - rem], rem);
+		mei_reg_write(hw, H_CB_WW, reg);
+	}
+
+	hcsr = mei_hcsr_read(hw) | H_IG;
+	mei_hcsr_set(hw, hcsr);
+	if (!mei_me_hw_is_ready(dev))
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * mei_me_count_full_read_slots - counts read full slots.
+ *
+ * @dev: the device structure
+ *
+ * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
+ */
+static int mei_me_count_full_read_slots(struct mei_device *dev)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	char read_ptr, write_ptr;
+	unsigned char buffer_depth, filled_slots;
+
+	hw->me_hw_state = mei_mecsr_read(hw);
+	buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
+	read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
+	write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
+	filled_slots = (unsigned char) (write_ptr - read_ptr);
+
+	/* check for overflow */
+	if (filled_slots > buffer_depth)
+		return -EOVERFLOW;
+
+	dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
+	return (int)filled_slots;
+}
+
+/**
+ * mei_me_read_slots - reads a message from mei device.
+ *
+ * @dev: the device structure
+ * @buffer: message buffer will be written
+ * @buffer_length: message size will be read
+ */
+static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
+		    unsigned long buffer_length)
+{
+	struct mei_me_hw *hw = to_me_hw(dev);
+	u32 *reg_buf = (u32 *)buffer;
+	u32 hcsr;
+
+	for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
+		*reg_buf++ = mei_me_mecbrw_read(dev);
+
+	if (buffer_length > 0) {
+		u32 reg = mei_me_mecbrw_read(dev);
+		memcpy(reg_buf, &reg, buffer_length);
+	}
+
+	hcsr = mei_hcsr_read(hw) | H_IG;
+	mei_hcsr_set(hw, hcsr);
+	return 0;
+}
+
+/**
+ * mei_me_irq_quick_handler - The ISR of the MEI device
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ */
+
+irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
+{
+	struct mei_device *dev = (struct mei_device *) dev_id;
+	struct mei_me_hw *hw = to_me_hw(dev);
+	u32 csr_reg = mei_hcsr_read(hw);
+
+	if ((csr_reg & H_IS) != H_IS)
+		return IRQ_NONE;
+
+	/* clear H_IS bit in H_CSR */
+	mei_reg_write(hw, H_CSR, csr_reg);
+
+	return IRQ_WAKE_THREAD;
+}
+
+/**
+ * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
+ * processing.
+ *
+ * @irq: The irq number
+ * @dev_id: pointer to the device structure
+ *
+ * returns irqreturn_t
+ *
+ */
+irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
+{
+	struct mei_device *dev = (struct mei_device *) dev_id;
+	struct mei_cl_cb complete_list;
+	struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
+	struct mei_cl *cl;
+	s32 slots;
+	int rets;
+	bool  bus_message_received;
+
+
+	dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
+	/* initialize our complete list */
+	mutex_lock(&dev->device_lock);
+	mei_io_list_init(&complete_list);
+
+	/* Ack the interrupt here
+	 * In case of MSI we don't go through the quick handler */
+	if (pci_dev_msi_enabled(dev->pdev))
+		mei_clear_interrupts(dev);
+
+	/* check if ME wants a reset */
+	if (!mei_hw_is_ready(dev) &&
+	    dev->dev_state != MEI_DEV_RESETING &&
+	    dev->dev_state != MEI_DEV_INITIALIZING) {
+		dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+		mei_reset(dev, 1);
+		mutex_unlock(&dev->device_lock);
+		return IRQ_HANDLED;
+	}
+
+	/*  check if we need to start the dev */
+	if (!mei_host_is_ready(dev)) {
+		if (mei_hw_is_ready(dev)) {
+			dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
+
+			mei_host_set_ready(dev);
+
+			dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
+			/* link is established * start sending messages.  */
+
+			dev->dev_state = MEI_DEV_INIT_CLIENTS;
+
+			mei_hbm_start_req(dev);
+			mutex_unlock(&dev->device_lock);
+			return IRQ_HANDLED;
+		} else {
+			dev_dbg(&dev->pdev->dev, "FW not ready.\n");
+			mutex_unlock(&dev->device_lock);
+			return IRQ_HANDLED;
+		}
+	}
+	/* check slots available for reading */
+	slots = mei_count_full_read_slots(dev);
+	while (slots > 0) {
+		/* we have urgent data to send so break the read */
+		if (dev->wr_ext_msg.hdr.length)
+			break;
+		dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
+		dev_dbg(&dev->pdev->dev, "call mei_irq_read_handler.\n");
+		rets = mei_irq_read_handler(dev, &complete_list, &slots);
+		if (rets)
+			goto end;
+	}
+	rets = mei_irq_write_handler(dev, &complete_list);
+end:
+	dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
+	dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
+
+	bus_message_received = false;
+	if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
+		dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
+		bus_message_received = true;
+	}
+	mutex_unlock(&dev->device_lock);
+	if (bus_message_received) {
+		dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
+		wake_up_interruptible(&dev->wait_recvd_msg);
+		bus_message_received = false;
+	}
+	if (list_empty(&complete_list.list))
+		return IRQ_HANDLED;
+
+
+	list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
+		cl = cb_pos->cl;
+		list_del(&cb_pos->list);
+		if (cl) {
+			if (cl != &dev->iamthif_cl) {
+				dev_dbg(&dev->pdev->dev, "completing call back.\n");
+				mei_irq_complete_handler(cl, cb_pos);
+				cb_pos = NULL;
+			} else if (cl == &dev->iamthif_cl) {
+				mei_amthif_complete(dev, cb_pos);
+			}
+		}
+	}
+	return IRQ_HANDLED;
+}
+static const struct mei_hw_ops mei_me_hw_ops = {
+
+	.host_set_ready = mei_me_host_set_ready,
+	.host_is_ready = mei_me_host_is_ready,
+
+	.hw_is_ready = mei_me_hw_is_ready,
+	.hw_reset = mei_me_hw_reset,
+	.hw_config  = mei_me_hw_config,
+
+	.intr_clear = mei_me_intr_clear,
+	.intr_enable = mei_me_intr_enable,
+	.intr_disable = mei_me_intr_disable,
+
+	.hbuf_free_slots = mei_me_hbuf_empty_slots,
+	.hbuf_is_ready = mei_me_hbuf_is_empty,
+	.hbuf_max_len = mei_me_hbuf_max_len,
+
+	.write = mei_me_write_message,
+
+	.rdbuf_full_slots = mei_me_count_full_read_slots,
+	.read_hdr = mei_me_mecbrw_read,
+	.read = mei_me_read_slots
+};
+
+/**
+ * init_mei_device - allocates and initializes the mei device structure
+ *
+ * @pdev: The pci device structure
+ *
+ * returns The mei_device_device pointer on success, NULL on failure.
+ */
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev)
+{
+	struct mei_device *dev;
+
+	dev = kzalloc(sizeof(struct mei_device) +
+			 sizeof(struct mei_me_hw), GFP_KERNEL);
+	if (!dev)
+		return NULL;
+
+	mei_device_init(dev);
+
+	INIT_LIST_HEAD(&dev->wd_cl.link);
+	INIT_LIST_HEAD(&dev->iamthif_cl.link);
+	mei_io_list_init(&dev->amthif_cmd_list);
+	mei_io_list_init(&dev->amthif_rd_complete_list);
+
+	INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+	INIT_WORK(&dev->init_work, mei_host_client_init);
+
+	dev->ops = &mei_me_hw_ops;
+
+	dev->pdev = pdev;
+	return dev;
+}
+
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
new file mode 100644
index 0000000..8518d3e
--- /dev/null
+++ b/drivers/misc/mei/hw-me.h
@@ -0,0 +1,48 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+
+
+#ifndef _MEI_INTERFACE_H_
+#define _MEI_INTERFACE_H_
+
+#include <linux/mei.h>
+#include "mei_dev.h"
+#include "client.h"
+
+struct mei_me_hw {
+	void __iomem *mem_addr;
+	/*
+	 * hw states of host and fw(ME)
+	 */
+	u32 host_hw_state;
+	u32 me_hw_state;
+};
+
+#define to_me_hw(dev) (struct mei_me_hw *)((dev)->hw)
+
+struct mei_device *mei_me_dev_init(struct pci_dev *pdev);
+
+/* get slots (dwords) from a message length + header (bytes) */
+static inline unsigned char mei_data2slots(size_t length)
+{
+	return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
+}
+
+irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id);
+irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id);
+
+#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index be8ca6b..cb2f556 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -31,109 +31,6 @@
 #define MEI_IAMTHIF_STALL_TIMER    12  /* HPS */
 #define MEI_IAMTHIF_READ_TIMER     10  /* HPS */
 
-/*
- * Internal Clients Number
- */
-#define MEI_WD_HOST_CLIENT_ID          1
-#define MEI_IAMTHIF_HOST_CLIENT_ID     2
-
-/*
- * MEI device IDs
- */
-#define MEI_DEV_ID_82946GZ    0x2974  /* 82946GZ/GL */
-#define MEI_DEV_ID_82G35      0x2984  /* 82G35 Express */
-#define MEI_DEV_ID_82Q965     0x2994  /* 82Q963/Q965 */
-#define MEI_DEV_ID_82G965     0x29A4  /* 82P965/G965 */
-
-#define MEI_DEV_ID_82GM965    0x2A04  /* Mobile PM965/GM965 */
-#define MEI_DEV_ID_82GME965   0x2A14  /* Mobile GME965/GLE960 */
-
-#define MEI_DEV_ID_ICH9_82Q35 0x29B4  /* 82Q35 Express */
-#define MEI_DEV_ID_ICH9_82G33 0x29C4  /* 82G33/G31/P35/P31 Express */
-#define MEI_DEV_ID_ICH9_82Q33 0x29D4  /* 82Q33 Express */
-#define MEI_DEV_ID_ICH9_82X38 0x29E4  /* 82X38/X48 Express */
-#define MEI_DEV_ID_ICH9_3200  0x29F4  /* 3200/3210 Server */
-
-#define MEI_DEV_ID_ICH9_6     0x28B4  /* Bearlake */
-#define MEI_DEV_ID_ICH9_7     0x28C4  /* Bearlake */
-#define MEI_DEV_ID_ICH9_8     0x28D4  /* Bearlake */
-#define MEI_DEV_ID_ICH9_9     0x28E4  /* Bearlake */
-#define MEI_DEV_ID_ICH9_10    0x28F4  /* Bearlake */
-
-#define MEI_DEV_ID_ICH9M_1    0x2A44  /* Cantiga */
-#define MEI_DEV_ID_ICH9M_2    0x2A54  /* Cantiga */
-#define MEI_DEV_ID_ICH9M_3    0x2A64  /* Cantiga */
-#define MEI_DEV_ID_ICH9M_4    0x2A74  /* Cantiga */
-
-#define MEI_DEV_ID_ICH10_1    0x2E04  /* Eaglelake */
-#define MEI_DEV_ID_ICH10_2    0x2E14  /* Eaglelake */
-#define MEI_DEV_ID_ICH10_3    0x2E24  /* Eaglelake */
-#define MEI_DEV_ID_ICH10_4    0x2E34  /* Eaglelake */
-
-#define MEI_DEV_ID_IBXPK_1    0x3B64  /* Calpella */
-#define MEI_DEV_ID_IBXPK_2    0x3B65  /* Calpella */
-
-#define MEI_DEV_ID_CPT_1      0x1C3A  /* Couger Point */
-#define MEI_DEV_ID_PBG_1      0x1D3A  /* C600/X79 Patsburg */
-
-#define MEI_DEV_ID_PPT_1      0x1E3A  /* Panther Point */
-#define MEI_DEV_ID_PPT_2      0x1CBA  /* Panther Point */
-#define MEI_DEV_ID_PPT_3      0x1DBA  /* Panther Point */
-
-#define MEI_DEV_ID_LPT        0x8C3A  /* Lynx Point */
-#define MEI_DEV_ID_LPT_LP     0x9C3A  /* Lynx Point LP */
-/*
- * MEI HW Section
- */
-
-/* MEI registers */
-/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
-#define H_CB_WW    0
-/* H_CSR - Host Control Status register */
-#define H_CSR      4
-/* ME_CB_RW - ME Circular Buffer Read Window register (read only) */
-#define ME_CB_RW   8
-/* ME_CSR_HA - ME Control Status Host Access register (read only) */
-#define ME_CSR_HA  0xC
-
-
-/* register bits of H_CSR (Host Control Status register) */
-/* Host Circular Buffer Depth - maximum number of 32-bit entries in CB */
-#define H_CBD             0xFF000000
-/* Host Circular Buffer Write Pointer */
-#define H_CBWP            0x00FF0000
-/* Host Circular Buffer Read Pointer */
-#define H_CBRP            0x0000FF00
-/* Host Reset */
-#define H_RST             0x00000010
-/* Host Ready */
-#define H_RDY             0x00000008
-/* Host Interrupt Generate */
-#define H_IG              0x00000004
-/* Host Interrupt Status */
-#define H_IS              0x00000002
-/* Host Interrupt Enable */
-#define H_IE              0x00000001
-
-
-/* register bits of ME_CSR_HA (ME Control Status Host Access register) */
-/* ME CB (Circular Buffer) Depth HRA (Host Read Access) - host read only
-access to ME_CBD */
-#define ME_CBD_HRA        0xFF000000
-/* ME CB Write Pointer HRA - host read only access to ME_CBWP */
-#define ME_CBWP_HRA       0x00FF0000
-/* ME CB Read Pointer HRA - host read only access to ME_CBRP */
-#define ME_CBRP_HRA       0x0000FF00
-/* ME Reset HRA - host read only access to ME_RST */
-#define ME_RST_HRA        0x00000010
-/* ME Ready HRA - host read only access to ME_RDY */
-#define ME_RDY_HRA        0x00000008
-/* ME Interrupt Generate HRA - host read only access to ME_IG */
-#define ME_IG_HRA         0x00000004
-/* ME Interrupt Status HRA - host read only access to ME_IS */
-#define ME_IS_HRA         0x00000002
-/* ME Interrupt Enable HRA - host read only access to ME_IE */
-#define ME_IE_HRA         0x00000001
 
 /*
  * MEI Version
@@ -224,6 +121,22 @@
 	u8 data[0];
 } __packed;
 
+/**
+ * struct hbm_cl_cmd - client specific host bus command
+ *	CONNECT, DISCONNECT, and FlOW CONTROL
+ *
+ * @hbm_cmd - bus message command header
+ * @me_addr - address of the client in ME
+ * @host_addr - address of the client in the driver
+ * @data
+ */
+struct mei_hbm_cl_cmd {
+	u8 hbm_cmd;
+	u8 me_addr;
+	u8 host_addr;
+	u8 data;
+};
+
 struct hbm_version {
 	u8 minor_version;
 	u8 major_version;
@@ -333,11 +246,5 @@
 	u8 reserved[MEI_FC_MESSAGE_RESERVED_LENGTH];
 } __packed;
 
-struct mei_me_client {
-	struct mei_client_properties props;
-	u8 client_id;
-	u8 mei_flow_ctrl_creds;
-} __packed;
-
 
 #endif
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index a54cd55..6ec5301 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -19,11 +19,11 @@
 #include <linux/wait.h>
 #include <linux/delay.h>
 
-#include "mei_dev.h"
-#include "hw.h"
-#include "interface.h"
 #include <linux/mei.h>
 
+#include "mei_dev.h"
+#include "client.h"
+
 const char *mei_dev_state_str(int state)
 {
 #define MEI_DEV_STATE(state) case MEI_DEV_##state: return #state
@@ -42,84 +42,20 @@
 #undef MEI_DEV_STATE
 }
 
-
-
-/**
- * mei_io_list_flush - removes list entry belonging to cl.
- *
- * @list:  An instance of our list structure
- * @cl: private data of the file object
- */
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
+void mei_device_init(struct mei_device *dev)
 {
-	struct mei_cl_cb *pos;
-	struct mei_cl_cb *next;
-
-	list_for_each_entry_safe(pos, next, &list->list, list) {
-		if (pos->cl) {
-			if (mei_cl_cmp_id(cl, pos->cl))
-				list_del(&pos->list);
-		}
-	}
-}
-/**
- * mei_cl_flush_queues - flushes queue lists belonging to cl.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- */
-int mei_cl_flush_queues(struct mei_cl *cl)
-{
-	if (!cl || !cl->dev)
-		return -EINVAL;
-
-	dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
-	mei_io_list_flush(&cl->dev->read_list, cl);
-	mei_io_list_flush(&cl->dev->write_list, cl);
-	mei_io_list_flush(&cl->dev->write_waiting_list, cl);
-	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
-	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
-	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
-	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
-	return 0;
-}
-
-
-
-/**
- * init_mei_device - allocates and initializes the mei device structure
- *
- * @pdev: The pci device structure
- *
- * returns The mei_device_device pointer on success, NULL on failure.
- */
-struct mei_device *mei_device_init(struct pci_dev *pdev)
-{
-	struct mei_device *dev;
-
-	dev = kzalloc(sizeof(struct mei_device), GFP_KERNEL);
-	if (!dev)
-		return NULL;
-
 	/* setup our list array */
 	INIT_LIST_HEAD(&dev->file_list);
-	INIT_LIST_HEAD(&dev->wd_cl.link);
-	INIT_LIST_HEAD(&dev->iamthif_cl.link);
 	mutex_init(&dev->device_lock);
 	init_waitqueue_head(&dev->wait_recvd_msg);
 	init_waitqueue_head(&dev->wait_stop_wd);
 	dev->dev_state = MEI_DEV_INITIALIZING;
-	dev->iamthif_state = MEI_IAMTHIF_IDLE;
 
 	mei_io_list_init(&dev->read_list);
 	mei_io_list_init(&dev->write_list);
 	mei_io_list_init(&dev->write_waiting_list);
 	mei_io_list_init(&dev->ctrl_wr_list);
 	mei_io_list_init(&dev->ctrl_rd_list);
-	mei_io_list_init(&dev->amthif_cmd_list);
-	mei_io_list_init(&dev->amthif_rd_complete_list);
-	dev->pdev = pdev;
-	return dev;
 }
 
 /**
@@ -131,101 +67,64 @@
  */
 int mei_hw_init(struct mei_device *dev)
 {
-	int err = 0;
-	int ret;
+	int ret = 0;
 
 	mutex_lock(&dev->device_lock);
 
-	dev->host_hw_state = mei_hcsr_read(dev);
-	dev->me_hw_state = mei_mecsr_read(dev);
-	dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, mestate = 0x%08x.\n",
-	    dev->host_hw_state, dev->me_hw_state);
-
 	/* acknowledge interrupt and stop interupts */
-	if ((dev->host_hw_state & H_IS) == H_IS)
-		mei_reg_write(dev, H_CSR, dev->host_hw_state);
+	mei_clear_interrupts(dev);
 
-	/* Doesn't change in runtime */
-	dev->hbuf_depth = (dev->host_hw_state & H_CBD) >> 24;
+	mei_hw_config(dev);
 
 	dev->recvd_msg = false;
 	dev_dbg(&dev->pdev->dev, "reset in start the mei device.\n");
 
 	mei_reset(dev, 1);
 
-	dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
-	    dev->host_hw_state, dev->me_hw_state);
-
 	/* wait for ME to turn on ME_RDY */
 	if (!dev->recvd_msg) {
 		mutex_unlock(&dev->device_lock);
-		err = wait_event_interruptible_timeout(dev->wait_recvd_msg,
+		ret = wait_event_interruptible_timeout(dev->wait_recvd_msg,
 			dev->recvd_msg,
 			mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
 		mutex_lock(&dev->device_lock);
 	}
 
-	if (err <= 0 && !dev->recvd_msg) {
+	if (ret <= 0 && !dev->recvd_msg) {
 		dev->dev_state = MEI_DEV_DISABLED;
 		dev_dbg(&dev->pdev->dev,
 			"wait_event_interruptible_timeout failed"
 			"on wait for ME to turn on ME_RDY.\n");
-		ret = -ENODEV;
-		goto out;
+		goto err;
 	}
 
-	if (!(((dev->host_hw_state & H_RDY) == H_RDY) &&
-	      ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA))) {
-		dev->dev_state = MEI_DEV_DISABLED;
-		dev_dbg(&dev->pdev->dev,
-			"host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
-			dev->host_hw_state, dev->me_hw_state);
 
-		if (!(dev->host_hw_state & H_RDY))
-			dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
+	if (!mei_host_is_ready(dev)) {
+		dev_err(&dev->pdev->dev, "host is not ready.\n");
+		goto err;
+	}
 
-		if (!(dev->me_hw_state & ME_RDY_HRA))
-			dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
-
-		dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
-		ret = -ENODEV;
-		goto out;
+	if (!mei_hw_is_ready(dev)) {
+		dev_err(&dev->pdev->dev, "ME is not ready.\n");
+		goto err;
 	}
 
 	if (dev->version.major_version != HBM_MAJOR_VERSION ||
 	    dev->version.minor_version != HBM_MINOR_VERSION) {
 		dev_dbg(&dev->pdev->dev, "MEI start failed.\n");
-		ret = -ENODEV;
-		goto out;
+		goto err;
 	}
 
 	dev->recvd_msg = false;
-	dev_dbg(&dev->pdev->dev, "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
-	    dev->host_hw_state, dev->me_hw_state);
-	dev_dbg(&dev->pdev->dev, "ME turn on ME_RDY and host turn on H_RDY.\n");
 	dev_dbg(&dev->pdev->dev, "link layer has been established.\n");
-	dev_dbg(&dev->pdev->dev, "MEI  start success.\n");
-	ret = 0;
 
-out:
 	mutex_unlock(&dev->device_lock);
-	return ret;
-}
-
-/**
- * mei_hw_reset - resets fw via mei csr register.
- *
- * @dev: the device structure
- * @interrupts_enabled: if interrupt should be enabled after reset.
- */
-static void mei_hw_reset(struct mei_device *dev, int interrupts_enabled)
-{
-	dev->host_hw_state |= (H_RST | H_IG);
-
-	if (interrupts_enabled)
-		mei_enable_interrupts(dev);
-	else
-		mei_disable_interrupts(dev);
+	return 0;
+err:
+	dev_err(&dev->pdev->dev, "link layer initialization failed.\n");
+	dev->dev_state = MEI_DEV_DISABLED;
+	mutex_unlock(&dev->device_lock);
+	return -ENODEV;
 }
 
 /**
@@ -236,56 +135,34 @@
  */
 void mei_reset(struct mei_device *dev, int interrupts_enabled)
 {
-	struct mei_cl *cl_pos = NULL;
-	struct mei_cl *cl_next = NULL;
-	struct mei_cl_cb *cb_pos = NULL;
-	struct mei_cl_cb *cb_next = NULL;
 	bool unexpected;
 
-	if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
-		dev->need_reset = true;
+	if (dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET)
 		return;
-	}
 
 	unexpected = (dev->dev_state != MEI_DEV_INITIALIZING &&
 			dev->dev_state != MEI_DEV_DISABLED &&
 			dev->dev_state != MEI_DEV_POWER_DOWN &&
 			dev->dev_state != MEI_DEV_POWER_UP);
 
-	dev->host_hw_state = mei_hcsr_read(dev);
-
-	dev_dbg(&dev->pdev->dev, "before reset host_hw_state = 0x%08x.\n",
-	    dev->host_hw_state);
-
 	mei_hw_reset(dev, interrupts_enabled);
 
-	dev->host_hw_state &= ~H_RST;
-	dev->host_hw_state |= H_IG;
-
-	mei_hcsr_set(dev);
-
-	dev_dbg(&dev->pdev->dev, "currently saved host_hw_state = 0x%08x.\n",
-	    dev->host_hw_state);
-
-	dev->need_reset = false;
 
 	if (dev->dev_state != MEI_DEV_INITIALIZING) {
 		if (dev->dev_state != MEI_DEV_DISABLED &&
 		    dev->dev_state != MEI_DEV_POWER_DOWN)
 			dev->dev_state = MEI_DEV_RESETING;
 
-		list_for_each_entry_safe(cl_pos,
-				cl_next, &dev->file_list, link) {
-			cl_pos->state = MEI_FILE_DISCONNECTED;
-			cl_pos->mei_flow_ctrl_creds = 0;
-			cl_pos->read_cb = NULL;
-			cl_pos->timer_count = 0;
-		}
+		mei_cl_all_disconnect(dev);
+
 		/* remove entry if already in list */
 		dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
-		mei_me_cl_unlink(dev, &dev->wd_cl);
-
-		mei_me_cl_unlink(dev, &dev->iamthif_cl);
+		mei_cl_unlink(&dev->wd_cl);
+		if (dev->open_handle_count > 0)
+			dev->open_handle_count--;
+		mei_cl_unlink(&dev->iamthif_cl);
+		if (dev->open_handle_count > 0)
+			dev->open_handle_count--;
 
 		mei_amthif_reset_params(dev);
 		memset(&dev->wr_ext_msg, 0, sizeof(dev->wr_ext_msg));
@@ -295,392 +172,17 @@
 	dev->rd_msg_hdr = 0;
 	dev->wd_pending = false;
 
-	/* update the state of the registers after reset */
-	dev->host_hw_state = mei_hcsr_read(dev);
-	dev->me_hw_state = mei_mecsr_read(dev);
-
-	dev_dbg(&dev->pdev->dev, "after reset host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
-	    dev->host_hw_state, dev->me_hw_state);
-
 	if (unexpected)
 		dev_warn(&dev->pdev->dev, "unexpected reset: dev_state = %s\n",
 			 mei_dev_state_str(dev->dev_state));
 
-	/* Wake up all readings so they can be interrupted */
-	list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
-		if (waitqueue_active(&cl_pos->rx_wait)) {
-			dev_dbg(&dev->pdev->dev, "Waking up client!\n");
-			wake_up_interruptible(&cl_pos->rx_wait);
-		}
-	}
+	/* wake up all readings so they can be interrupted */
+	mei_cl_all_read_wakeup(dev);
+
 	/* remove all waiting requests */
-	list_for_each_entry_safe(cb_pos, cb_next, &dev->write_list.list, list) {
-		list_del(&cb_pos->list);
-		mei_io_cb_free(cb_pos);
-	}
+	mei_cl_all_write_clear(dev);
 }
 
 
 
-/**
- * host_start_message - mei host sends start message.
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_host_start_message(struct mei_device *dev)
-{
-	struct mei_msg_hdr *mei_hdr;
-	struct hbm_host_version_request *start_req;
-	const size_t len = sizeof(struct hbm_host_version_request);
-
-	mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
-	/* host start message */
-	start_req = (struct hbm_host_version_request *)&dev->wr_msg_buf[1];
-	memset(start_req, 0, len);
-	start_req->hbm_cmd = HOST_START_REQ_CMD;
-	start_req->host_version.major_version = HBM_MAJOR_VERSION;
-	start_req->host_version.minor_version = HBM_MINOR_VERSION;
-
-	dev->recvd_msg = false;
-	if (mei_write_message(dev, mei_hdr, (unsigned char *)start_req, len)) {
-		dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
-		dev->dev_state = MEI_DEV_RESETING;
-		mei_reset(dev, 1);
-	}
-	dev->init_clients_state = MEI_START_MESSAGE;
-	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
-	return ;
-}
-
-/**
- * host_enum_clients_message - host sends enumeration client request message.
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_host_enum_clients_message(struct mei_device *dev)
-{
-	struct mei_msg_hdr *mei_hdr;
-	struct hbm_host_enum_request *enum_req;
-	const size_t len = sizeof(struct hbm_host_enum_request);
-	/* enumerate clients */
-	mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
-	enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
-	memset(enum_req, 0, sizeof(struct hbm_host_enum_request));
-	enum_req->hbm_cmd = HOST_ENUM_REQ_CMD;
-
-	if (mei_write_message(dev, mei_hdr, (unsigned char *)enum_req, len)) {
-		dev->dev_state = MEI_DEV_RESETING;
-		dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
-		mei_reset(dev, 1);
-	}
-	dev->init_clients_state = MEI_ENUM_CLIENTS_MESSAGE;
-	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
-	return;
-}
-
-
-/**
- * allocate_me_clients_storage - allocates storage for me clients
- *
- * @dev: the device structure
- *
- * returns none.
- */
-void mei_allocate_me_clients_storage(struct mei_device *dev)
-{
-	struct mei_me_client *clients;
-	int b;
-
-	/* count how many ME clients we have */
-	for_each_set_bit(b, dev->me_clients_map, MEI_CLIENTS_MAX)
-		dev->me_clients_num++;
-
-	if (dev->me_clients_num <= 0)
-		return ;
-
-
-	if (dev->me_clients != NULL) {
-		kfree(dev->me_clients);
-		dev->me_clients = NULL;
-	}
-	dev_dbg(&dev->pdev->dev, "memory allocation for ME clients size=%zd.\n",
-		dev->me_clients_num * sizeof(struct mei_me_client));
-	/* allocate storage for ME clients representation */
-	clients = kcalloc(dev->me_clients_num,
-			sizeof(struct mei_me_client), GFP_KERNEL);
-	if (!clients) {
-		dev_dbg(&dev->pdev->dev, "memory allocation for ME clients failed.\n");
-		dev->dev_state = MEI_DEV_RESETING;
-		mei_reset(dev, 1);
-		return ;
-	}
-	dev->me_clients = clients;
-	return ;
-}
-
-void mei_host_client_init(struct work_struct *work)
-{
-	struct mei_device *dev = container_of(work,
-					      struct mei_device, init_work);
-	struct mei_client_properties *client_props;
-	int i;
-
-	mutex_lock(&dev->device_lock);
-
-	bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
-	dev->open_handle_count = 0;
-
-	/*
-	 * Reserving the first three client IDs
-	 * 0: Reserved for MEI Bus Message communications
-	 * 1: Reserved for Watchdog
-	 * 2: Reserved for AMTHI
-	 */
-	bitmap_set(dev->host_clients_map, 0, 3);
-
-	for (i = 0; i < dev->me_clients_num; i++) {
-		client_props = &dev->me_clients[i].props;
-
-		if (!uuid_le_cmp(client_props->protocol_name, mei_amthi_guid))
-			mei_amthif_host_init(dev);
-		else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
-			mei_wd_host_init(dev);
-	}
-
-	dev->dev_state = MEI_DEV_ENABLED;
-
-	mutex_unlock(&dev->device_lock);
-}
-
-int mei_host_client_enumerate(struct mei_device *dev)
-{
-
-	struct mei_msg_hdr *mei_hdr;
-	struct hbm_props_request *prop_req;
-	const size_t len = sizeof(struct hbm_props_request);
-	unsigned long next_client_index;
-	u8 client_num;
-
-
-	client_num = dev->me_client_presentation_num;
-
-	next_client_index = find_next_bit(dev->me_clients_map, MEI_CLIENTS_MAX,
-					  dev->me_client_index);
-
-	/* We got all client properties */
-	if (next_client_index == MEI_CLIENTS_MAX) {
-		schedule_work(&dev->init_work);
-
-		return 0;
-	}
-
-	dev->me_clients[client_num].client_id = next_client_index;
-	dev->me_clients[client_num].mei_flow_ctrl_creds = 0;
-
-	mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-	prop_req = (struct hbm_props_request *)&dev->wr_msg_buf[1];
-
-	memset(prop_req, 0, sizeof(struct hbm_props_request));
-
-
-	prop_req->hbm_cmd = HOST_CLIENT_PROPERTIES_REQ_CMD;
-	prop_req->address = next_client_index;
-
-	if (mei_write_message(dev, mei_hdr, (unsigned char *) prop_req,
-			      mei_hdr->length)) {
-		dev->dev_state = MEI_DEV_RESETING;
-		dev_err(&dev->pdev->dev, "Properties request command failed\n");
-		mei_reset(dev, 1);
-
-		return -EIO;
-	}
-
-	dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
-	dev->me_client_index = next_client_index;
-
-	return 0;
-}
-
-/**
- * mei_init_file_private - initializes private file structure.
- *
- * @priv: private file structure to be initialized
- * @file: the file structure
- */
-void mei_cl_init(struct mei_cl *priv, struct mei_device *dev)
-{
-	memset(priv, 0, sizeof(struct mei_cl));
-	init_waitqueue_head(&priv->wait);
-	init_waitqueue_head(&priv->rx_wait);
-	init_waitqueue_head(&priv->tx_wait);
-	INIT_LIST_HEAD(&priv->link);
-	priv->reading_state = MEI_IDLE;
-	priv->writing_state = MEI_IDLE;
-	priv->dev = dev;
-}
-
-int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid)
-{
-	int i, res = -ENOENT;
-
-	for (i = 0; i < dev->me_clients_num; ++i)
-		if (uuid_le_cmp(*cuuid,
-				dev->me_clients[i].props.protocol_name) == 0) {
-			res = i;
-			break;
-		}
-
-	return res;
-}
-
-
-/**
- * mei_me_cl_link - create link between host and me clinet and add
- *   me_cl to the list
- *
- * @dev: the device structure
- * @cl: link between me and host client assocated with opened file descriptor
- * @cuuid: uuid of ME client
- * @client_id: id of the host client
- *
- * returns ME client index if ME client
- *	-EINVAL on incorrect values
- *	-ENONET if client not found
- */
-int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
-			const uuid_le *cuuid, u8 host_cl_id)
-{
-	int i;
-
-	if (!dev || !cl || !cuuid)
-		return -EINVAL;
-
-	/* check for valid client id */
-	i = mei_me_cl_by_uuid(dev, cuuid);
-	if (i >= 0) {
-		cl->me_client_id = dev->me_clients[i].client_id;
-		cl->state = MEI_FILE_CONNECTING;
-		cl->host_client_id = host_cl_id;
-
-		list_add_tail(&cl->link, &dev->file_list);
-		return (u8)i;
-	}
-
-	return -ENOENT;
-}
-/**
- * mei_me_cl_unlink - remove me_cl from the list
- *
- * @dev: the device structure
- * @host_client_id: host client id to be removed
- */
-void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl)
-{
-	struct mei_cl *pos, *next;
-	list_for_each_entry_safe(pos, next, &dev->file_list, link) {
-		if (cl->host_client_id == pos->host_client_id) {
-			dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
-					pos->host_client_id, pos->me_client_id);
-			list_del_init(&pos->link);
-			break;
-		}
-	}
-}
-
-/**
- * mei_alloc_file_private - allocates a private file structure and sets it up.
- * @file: the file structure
- *
- * returns  The allocated file or NULL on failure
- */
-struct mei_cl *mei_cl_allocate(struct mei_device *dev)
-{
-	struct mei_cl *cl;
-
-	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
-	if (!cl)
-		return NULL;
-
-	mei_cl_init(cl, dev);
-
-	return cl;
-}
-
-
-
-/**
- * mei_disconnect_host_client - sends disconnect message to fw from host client.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl)
-{
-	struct mei_cl_cb *cb;
-	int rets, err;
-
-	if (!dev || !cl)
-		return -ENODEV;
-
-	if (cl->state != MEI_FILE_DISCONNECTING)
-		return 0;
-
-	cb = mei_io_cb_init(cl, NULL);
-	if (!cb)
-		return -ENOMEM;
-
-	cb->fop_type = MEI_FOP_CLOSE;
-	if (dev->mei_host_buffer_is_empty) {
-		dev->mei_host_buffer_is_empty = false;
-		if (mei_disconnect(dev, cl)) {
-			rets = -ENODEV;
-			dev_dbg(&dev->pdev->dev, "failed to call mei_disconnect.\n");
-			goto free;
-		}
-		mdelay(10); /* Wait for hardware disconnection ready */
-		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
-	} else {
-		dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
-		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-
-	}
-	mutex_unlock(&dev->device_lock);
-
-	err = wait_event_timeout(dev->wait_recvd_msg,
-			MEI_FILE_DISCONNECTED == cl->state,
-			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
-
-	mutex_lock(&dev->device_lock);
-	if (MEI_FILE_DISCONNECTED == cl->state) {
-		rets = 0;
-		dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
-	} else {
-		rets = -ENODEV;
-		if (MEI_FILE_DISCONNECTED != cl->state)
-			dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
-
-		if (err)
-			dev_dbg(&dev->pdev->dev,
-					"wait failed disconnect err=%08x\n",
-					err);
-
-		dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
-	}
-
-	mei_io_list_flush(&dev->ctrl_rd_list, cl);
-	mei_io_list_flush(&dev->ctrl_wr_list, cl);
-free:
-	mei_io_cb_free(cb);
-	return rets;
-}
 
diff --git a/drivers/misc/mei/interface.c b/drivers/misc/mei/interface.c
deleted file mode 100644
index 8de8547..0000000
--- a/drivers/misc/mei/interface.c
+++ /dev/null
@@ -1,388 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-
-#include <linux/pci.h>
-#include "mei_dev.h"
-#include <linux/mei.h>
-#include "interface.h"
-
-
-
-/**
- * mei_set_csr_register - writes H_CSR register to the mei device,
- * and ignores the H_IS bit for it is write-one-to-zero.
- *
- * @dev: the device structure
- */
-void mei_hcsr_set(struct mei_device *dev)
-{
-	if ((dev->host_hw_state & H_IS) == H_IS)
-		dev->host_hw_state &= ~H_IS;
-	mei_reg_write(dev, H_CSR, dev->host_hw_state);
-	dev->host_hw_state = mei_hcsr_read(dev);
-}
-
-/**
- * mei_csr_enable_interrupts - enables mei device interrupts
- *
- * @dev: the device structure
- */
-void mei_enable_interrupts(struct mei_device *dev)
-{
-	dev->host_hw_state |= H_IE;
-	mei_hcsr_set(dev);
-}
-
-/**
- * mei_csr_disable_interrupts - disables mei device interrupts
- *
- * @dev: the device structure
- */
-void mei_disable_interrupts(struct mei_device *dev)
-{
-	dev->host_hw_state &= ~H_IE;
-	mei_hcsr_set(dev);
-}
-
-/**
- * mei_hbuf_filled_slots - gets number of device filled buffer slots
- *
- * @device: the device structure
- *
- * returns number of filled slots
- */
-static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
-{
-	char read_ptr, write_ptr;
-
-	dev->host_hw_state = mei_hcsr_read(dev);
-
-	read_ptr = (char) ((dev->host_hw_state & H_CBRP) >> 8);
-	write_ptr = (char) ((dev->host_hw_state & H_CBWP) >> 16);
-
-	return (unsigned char) (write_ptr - read_ptr);
-}
-
-/**
- * mei_hbuf_is_empty - checks if host buffer is empty.
- *
- * @dev: the device structure
- *
- * returns true if empty, false - otherwise.
- */
-bool mei_hbuf_is_empty(struct mei_device *dev)
-{
-	return mei_hbuf_filled_slots(dev) == 0;
-}
-
-/**
- * mei_hbuf_empty_slots - counts write empty slots.
- *
- * @dev: the device structure
- *
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise empty slots count
- */
-int mei_hbuf_empty_slots(struct mei_device *dev)
-{
-	unsigned char filled_slots, empty_slots;
-
-	filled_slots = mei_hbuf_filled_slots(dev);
-	empty_slots = dev->hbuf_depth - filled_slots;
-
-	/* check for overflow */
-	if (filled_slots > dev->hbuf_depth)
-		return -EOVERFLOW;
-
-	return empty_slots;
-}
-
-/**
- * mei_write_message - writes a message to mei device.
- *
- * @dev: the device structure
- * @header: header of message
- * @write_buffer: message buffer will be written
- * @write_length: message size will be written
- *
- * This function returns -EIO if write has failed
- */
-int mei_write_message(struct mei_device *dev, struct mei_msg_hdr *header,
-		      unsigned char *buf, unsigned long length)
-{
-	unsigned long rem, dw_cnt;
-	u32 *reg_buf = (u32 *)buf;
-	int i;
-	int empty_slots;
-
-
-	dev_dbg(&dev->pdev->dev,
-			"mei_write_message header=%08x.\n",
-			*((u32 *) header));
-
-	empty_slots = mei_hbuf_empty_slots(dev);
-	dev_dbg(&dev->pdev->dev, "empty slots = %hu.\n", empty_slots);
-
-	dw_cnt = mei_data2slots(length);
-	if (empty_slots < 0 || dw_cnt > empty_slots)
-		return -EIO;
-
-	mei_reg_write(dev, H_CB_WW, *((u32 *) header));
-
-	for (i = 0; i < length / 4; i++)
-		mei_reg_write(dev, H_CB_WW, reg_buf[i]);
-
-	rem = length & 0x3;
-	if (rem > 0) {
-		u32 reg = 0;
-		memcpy(&reg, &buf[length - rem], rem);
-		mei_reg_write(dev, H_CB_WW, reg);
-	}
-
-	dev->host_hw_state = mei_hcsr_read(dev);
-	dev->host_hw_state |= H_IG;
-	mei_hcsr_set(dev);
-	dev->me_hw_state = mei_mecsr_read(dev);
-	if ((dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA)
-		return -EIO;
-
-	return 0;
-}
-
-/**
- * mei_count_full_read_slots - counts read full slots.
- *
- * @dev: the device structure
- *
- * returns -1(ESLOTS_OVERFLOW) if overflow, otherwise filled slots count
- */
-int mei_count_full_read_slots(struct mei_device *dev)
-{
-	char read_ptr, write_ptr;
-	unsigned char buffer_depth, filled_slots;
-
-	dev->me_hw_state = mei_mecsr_read(dev);
-	buffer_depth = (unsigned char)((dev->me_hw_state & ME_CBD_HRA) >> 24);
-	read_ptr = (char) ((dev->me_hw_state & ME_CBRP_HRA) >> 8);
-	write_ptr = (char) ((dev->me_hw_state & ME_CBWP_HRA) >> 16);
-	filled_slots = (unsigned char) (write_ptr - read_ptr);
-
-	/* check for overflow */
-	if (filled_slots > buffer_depth)
-		return -EOVERFLOW;
-
-	dev_dbg(&dev->pdev->dev, "filled_slots =%08x\n", filled_slots);
-	return (int)filled_slots;
-}
-
-/**
- * mei_read_slots - reads a message from mei device.
- *
- * @dev: the device structure
- * @buffer: message buffer will be written
- * @buffer_length: message size will be read
- */
-void mei_read_slots(struct mei_device *dev, unsigned char *buffer,
-		    unsigned long buffer_length)
-{
-	u32 *reg_buf = (u32 *)buffer;
-
-	for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
-		*reg_buf++ = mei_mecbrw_read(dev);
-
-	if (buffer_length > 0) {
-		u32 reg = mei_mecbrw_read(dev);
-		memcpy(reg_buf, &reg, buffer_length);
-	}
-
-	dev->host_hw_state |= H_IG;
-	mei_hcsr_set(dev);
-}
-
-/**
- * mei_flow_ctrl_creds - checks flow_control credentials.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
- *	-ENOENT if mei_cl is not present
- *	-EINVAL if single_recv_buf == 0
- */
-int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl)
-{
-	int i;
-
-	if (!dev->me_clients_num)
-		return 0;
-
-	if (cl->mei_flow_ctrl_creds > 0)
-		return 1;
-
-	for (i = 0; i < dev->me_clients_num; i++) {
-		struct mei_me_client  *me_cl = &dev->me_clients[i];
-		if (me_cl->client_id == cl->me_client_id) {
-			if (me_cl->mei_flow_ctrl_creds) {
-				if (WARN_ON(me_cl->props.single_recv_buf == 0))
-					return -EINVAL;
-				return 1;
-			} else {
-				return 0;
-			}
-		}
-	}
-	return -ENOENT;
-}
-
-/**
- * mei_flow_ctrl_reduce - reduces flow_control.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- * @returns
- *	0 on success
- *	-ENOENT when me client is not found
- *	-EINVAL when ctrl credits are <= 0
- */
-int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl)
-{
-	int i;
-
-	if (!dev->me_clients_num)
-		return -ENOENT;
-
-	for (i = 0; i < dev->me_clients_num; i++) {
-		struct mei_me_client  *me_cl = &dev->me_clients[i];
-		if (me_cl->client_id == cl->me_client_id) {
-			if (me_cl->props.single_recv_buf != 0) {
-				if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
-					return -EINVAL;
-				dev->me_clients[i].mei_flow_ctrl_creds--;
-			} else {
-				if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
-					return -EINVAL;
-				cl->mei_flow_ctrl_creds--;
-			}
-			return 0;
-		}
-	}
-	return -ENOENT;
-}
-
-/**
- * mei_send_flow_control - sends flow control to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl)
-{
-	struct mei_msg_hdr *mei_hdr;
-	struct hbm_flow_control *flow_ctrl;
-	const size_t len = sizeof(struct hbm_flow_control);
-
-	mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
-	flow_ctrl = (struct hbm_flow_control *)&dev->wr_msg_buf[1];
-	memset(flow_ctrl, 0, len);
-	flow_ctrl->hbm_cmd = MEI_FLOW_CONTROL_CMD;
-	flow_ctrl->host_addr = cl->host_client_id;
-	flow_ctrl->me_addr = cl->me_client_id;
-	/* FIXME: reserved !? */
-	memset(flow_ctrl->reserved, 0, sizeof(flow_ctrl->reserved));
-	dev_dbg(&dev->pdev->dev, "sending flow control host client = %d, ME client = %d\n",
-		cl->host_client_id, cl->me_client_id);
-
-	return mei_write_message(dev, mei_hdr,
-			(unsigned char *) flow_ctrl, len);
-}
-
-/**
- * mei_other_client_is_connecting - checks if other
- *    client with the same client id is connected.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * returns 1 if other client is connected, 0 - otherwise.
- */
-int mei_other_client_is_connecting(struct mei_device *dev,
-				struct mei_cl *cl)
-{
-	struct mei_cl *cl_pos = NULL;
-	struct mei_cl *cl_next = NULL;
-
-	list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
-		if ((cl_pos->state == MEI_FILE_CONNECTING) &&
-			(cl_pos != cl) &&
-			cl->me_client_id == cl_pos->me_client_id)
-			return 1;
-
-	}
-	return 0;
-}
-
-/**
- * mei_disconnect - sends disconnect message to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_disconnect(struct mei_device *dev, struct mei_cl *cl)
-{
-	struct mei_msg_hdr *mei_hdr;
-	struct hbm_client_connect_request *req;
-	const size_t len = sizeof(struct hbm_client_connect_request);
-
-	mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
-	req = (struct hbm_client_connect_request *)&dev->wr_msg_buf[1];
-	memset(req, 0, len);
-	req->hbm_cmd = CLIENT_DISCONNECT_REQ_CMD;
-	req->host_addr = cl->host_client_id;
-	req->me_addr = cl->me_client_id;
-	req->reserved = 0;
-
-	return mei_write_message(dev, mei_hdr, (unsigned char *)req, len);
-}
-
-/**
- * mei_connect - sends connect message to fw.
- *
- * @dev: the device structure
- * @cl: private data of the file object
- *
- * This function returns -EIO on write failure
- */
-int mei_connect(struct mei_device *dev, struct mei_cl *cl)
-{
-	struct mei_msg_hdr *mei_hdr;
-	struct hbm_client_connect_request *req;
-	const size_t len = sizeof(struct hbm_client_connect_request);
-
-	mei_hdr = mei_hbm_hdr(&dev->wr_msg_buf[0], len);
-
-	req = (struct hbm_client_connect_request *) &dev->wr_msg_buf[1];
-	req->hbm_cmd = CLIENT_CONNECT_REQ_CMD;
-	req->host_addr = cl->host_client_id;
-	req->me_addr = cl->me_client_id;
-	req->reserved = 0;
-
-	return mei_write_message(dev, mei_hdr, (unsigned char *) req, len);
-}
diff --git a/drivers/misc/mei/interface.h b/drivers/misc/mei/interface.h
deleted file mode 100644
index ec6c785..0000000
--- a/drivers/misc/mei/interface.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-
-
-
-#ifndef _MEI_INTERFACE_H_
-#define _MEI_INTERFACE_H_
-
-#include <linux/mei.h>
-#include "mei_dev.h"
-
-
-
-void mei_read_slots(struct mei_device *dev,
-		     unsigned char *buffer,
-		     unsigned long buffer_length);
-
-int mei_write_message(struct mei_device *dev,
-			     struct mei_msg_hdr *header,
-			     unsigned char *write_buffer,
-			     unsigned long write_length);
-
-bool mei_hbuf_is_empty(struct mei_device *dev);
-
-int mei_hbuf_empty_slots(struct mei_device *dev);
-
-static inline size_t mei_hbuf_max_data(const struct mei_device *dev)
-{
-	return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
-}
-
-/* get slots (dwords) from a message length + header (bytes) */
-static inline unsigned char mei_data2slots(size_t length)
-{
-	return DIV_ROUND_UP(sizeof(struct mei_msg_hdr) + length, 4);
-}
-
-int mei_count_full_read_slots(struct mei_device *dev);
-
-
-int mei_flow_ctrl_creds(struct mei_device *dev, struct mei_cl *cl);
-
-
-
-int mei_wd_send(struct mei_device *dev);
-int mei_wd_stop(struct mei_device *dev);
-int mei_wd_host_init(struct mei_device *dev);
-/*
- * mei_watchdog_register  - Registering watchdog interface
- *   once we got connection to the WD Client
- * @dev - mei device
- */
-void mei_watchdog_register(struct mei_device *dev);
-/*
- * mei_watchdog_unregister  - Unregistering watchdog interface
- * @dev - mei device
- */
-void mei_watchdog_unregister(struct mei_device *dev);
-
-int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
-
-int mei_send_flow_control(struct mei_device *dev, struct mei_cl *cl);
-
-int mei_disconnect(struct mei_device *dev, struct mei_cl *cl);
-int mei_other_client_is_connecting(struct mei_device *dev, struct mei_cl *cl);
-int mei_connect(struct mei_device *dev, struct mei_cl *cl);
-
-#endif /* _MEI_INTERFACE_H_ */
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 04fa213..3535b26 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -21,41 +21,21 @@
 #include <linux/fs.h>
 #include <linux/jiffies.h>
 
-#include "mei_dev.h"
 #include <linux/mei.h>
-#include "hw.h"
-#include "interface.h"
+
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
 
 
 /**
- * mei_interrupt_quick_handler - The ISR of the MEI device
- *
- * @irq: The irq number
- * @dev_id: pointer to the device structure
- *
- * returns irqreturn_t
- */
-irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id)
-{
-	struct mei_device *dev = (struct mei_device *) dev_id;
-	u32 csr_reg = mei_hcsr_read(dev);
-
-	if ((csr_reg & H_IS) != H_IS)
-		return IRQ_NONE;
-
-	/* clear H_IS bit in H_CSR */
-	mei_reg_write(dev, H_CSR, csr_reg);
-
-	return IRQ_WAKE_THREAD;
-}
-
-/**
- * _mei_cmpl - processes completed operation.
+ * mei_complete_handler - processes completed operation.
  *
  * @cl: private data of the file object.
  * @cb_pos: callback block.
  */
-static void _mei_cmpl(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
+void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos)
 {
 	if (cb_pos->fop_type == MEI_FOP_WRITE) {
 		mei_io_cb_free(cb_pos);
@@ -150,8 +130,8 @@
 	dev_dbg(&dev->pdev->dev, "message read\n");
 	if (!buffer) {
 		mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
-		dev_dbg(&dev->pdev->dev, "discarding message, header =%08x.\n",
-				*(u32 *) dev->rd_msg_buf);
+		dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
+				MEI_HDR_PRM(mei_hdr));
 	}
 
 	return 0;
@@ -179,7 +159,7 @@
 
 	*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
 
-	if (mei_disconnect(dev, cl)) {
+	if (mei_hbm_cl_disconnect_req(dev, cl)) {
 		cl->status = 0;
 		cb_pos->buf_idx = 0;
 		list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -195,440 +175,6 @@
 	return 0;
 }
 
-/**
- * is_treat_specially_client - checks if the message belongs
- * to the file private data.
- *
- * @cl: private data of the file object
- * @rs: connect response bus message
- *
- */
-static bool is_treat_specially_client(struct mei_cl *cl,
-		struct hbm_client_connect_response *rs)
-{
-
-	if (cl->host_client_id == rs->host_addr &&
-	    cl->me_client_id == rs->me_addr) {
-		if (!rs->status) {
-			cl->state = MEI_FILE_CONNECTED;
-			cl->status = 0;
-
-		} else {
-			cl->state = MEI_FILE_DISCONNECTED;
-			cl->status = -ENODEV;
-		}
-		cl->timer_count = 0;
-
-		return true;
-	}
-	return false;
-}
-
-/**
- * mei_client_connect_response - connects to response irq routine
- *
- * @dev: the device structure
- * @rs: connect response bus message
- */
-static void mei_client_connect_response(struct mei_device *dev,
-		struct hbm_client_connect_response *rs)
-{
-
-	struct mei_cl *cl;
-	struct mei_cl_cb *pos = NULL, *next = NULL;
-
-	dev_dbg(&dev->pdev->dev,
-			"connect_response:\n"
-			"ME Client = %d\n"
-			"Host Client = %d\n"
-			"Status = %d\n",
-			rs->me_addr,
-			rs->host_addr,
-			rs->status);
-
-	/* if WD or iamthif client treat specially */
-
-	if (is_treat_specially_client(&(dev->wd_cl), rs)) {
-		dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
-		mei_watchdog_register(dev);
-
-		return;
-	}
-
-	if (is_treat_specially_client(&(dev->iamthif_cl), rs)) {
-		dev->iamthif_state = MEI_IAMTHIF_IDLE;
-		return;
-	}
-	list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
-
-		cl = pos->cl;
-		if (!cl) {
-			list_del(&pos->list);
-			return;
-		}
-		if (pos->fop_type == MEI_FOP_IOCTL) {
-			if (is_treat_specially_client(cl, rs)) {
-				list_del(&pos->list);
-				cl->status = 0;
-				cl->timer_count = 0;
-				break;
-			}
-		}
-	}
-}
-
-/**
- * mei_client_disconnect_response - disconnects from response irq routine
- *
- * @dev: the device structure
- * @rs: disconnect response bus message
- */
-static void mei_client_disconnect_response(struct mei_device *dev,
-					struct hbm_client_connect_response *rs)
-{
-	struct mei_cl *cl;
-	struct mei_cl_cb *pos = NULL, *next = NULL;
-
-	dev_dbg(&dev->pdev->dev,
-			"disconnect_response:\n"
-			"ME Client = %d\n"
-			"Host Client = %d\n"
-			"Status = %d\n",
-			rs->me_addr,
-			rs->host_addr,
-			rs->status);
-
-	list_for_each_entry_safe(pos, next, &dev->ctrl_rd_list.list, list) {
-		cl = pos->cl;
-
-		if (!cl) {
-			list_del(&pos->list);
-			return;
-		}
-
-		dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
-		if (cl->host_client_id == rs->host_addr &&
-		    cl->me_client_id == rs->me_addr) {
-
-			list_del(&pos->list);
-			if (!rs->status)
-				cl->state = MEI_FILE_DISCONNECTED;
-
-			cl->status = 0;
-			cl->timer_count = 0;
-			break;
-		}
-	}
-}
-
-/**
- * same_flow_addr - tells if they have the same address.
- *
- * @file: private data of the file object.
- * @flow: flow control.
- *
- * returns  !=0, same; 0,not.
- */
-static int same_flow_addr(struct mei_cl *cl, struct hbm_flow_control *flow)
-{
-	return (cl->host_client_id == flow->host_addr &&
-		cl->me_client_id == flow->me_addr);
-}
-
-/**
- * add_single_flow_creds - adds single buffer credentials.
- *
- * @file: private data ot the file object.
- * @flow: flow control.
- */
-static void add_single_flow_creds(struct mei_device *dev,
-				  struct hbm_flow_control *flow)
-{
-	struct mei_me_client *client;
-	int i;
-
-	for (i = 0; i < dev->me_clients_num; i++) {
-		client = &dev->me_clients[i];
-		if (client && flow->me_addr == client->client_id) {
-			if (client->props.single_recv_buf) {
-				client->mei_flow_ctrl_creds++;
-				dev_dbg(&dev->pdev->dev, "recv flow ctrl msg ME %d (single).\n",
-				    flow->me_addr);
-				dev_dbg(&dev->pdev->dev, "flow control credentials =%d.\n",
-				    client->mei_flow_ctrl_creds);
-			} else {
-				BUG();	/* error in flow control */
-			}
-		}
-	}
-}
-
-/**
- * mei_client_flow_control_response - flow control response irq routine
- *
- * @dev: the device structure
- * @flow_control: flow control response bus message
- */
-static void mei_client_flow_control_response(struct mei_device *dev,
-		struct hbm_flow_control *flow_control)
-{
-	struct mei_cl *cl_pos = NULL;
-	struct mei_cl *cl_next = NULL;
-
-	if (!flow_control->host_addr) {
-		/* single receive buffer */
-		add_single_flow_creds(dev, flow_control);
-	} else {
-		/* normal connection */
-		list_for_each_entry_safe(cl_pos, cl_next,
-				&dev->file_list, link) {
-			dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in file_list\n");
-
-			dev_dbg(&dev->pdev->dev, "cl of host client %d ME client %d.\n",
-			    cl_pos->host_client_id,
-			    cl_pos->me_client_id);
-			dev_dbg(&dev->pdev->dev, "flow ctrl msg for host %d ME %d.\n",
-			    flow_control->host_addr,
-			    flow_control->me_addr);
-			if (same_flow_addr(cl_pos, flow_control)) {
-				dev_dbg(&dev->pdev->dev, "recv ctrl msg for host  %d ME %d.\n",
-				    flow_control->host_addr,
-				    flow_control->me_addr);
-				cl_pos->mei_flow_ctrl_creds++;
-				dev_dbg(&dev->pdev->dev, "flow control credentials = %d.\n",
-				    cl_pos->mei_flow_ctrl_creds);
-				break;
-			}
-		}
-	}
-}
-
-/**
- * same_disconn_addr - tells if they have the same address
- *
- * @file: private data of the file object.
- * @disconn: disconnection request.
- *
- * returns !=0, same; 0,not.
- */
-static int same_disconn_addr(struct mei_cl *cl,
-			     struct hbm_client_connect_request *req)
-{
-	return (cl->host_client_id == req->host_addr &&
-		cl->me_client_id == req->me_addr);
-}
-
-/**
- * mei_client_disconnect_request - disconnects from request irq routine
- *
- * @dev: the device structure.
- * @disconnect_req: disconnect request bus message.
- */
-static void mei_client_disconnect_request(struct mei_device *dev,
-		struct hbm_client_connect_request *disconnect_req)
-{
-	struct hbm_client_connect_response *disconnect_res;
-	struct mei_cl *pos, *next;
-	const size_t len = sizeof(struct hbm_client_connect_response);
-
-	list_for_each_entry_safe(pos, next, &dev->file_list, link) {
-		if (same_disconn_addr(pos, disconnect_req)) {
-			dev_dbg(&dev->pdev->dev, "disconnect request host client %d ME client %d.\n",
-					disconnect_req->host_addr,
-					disconnect_req->me_addr);
-			pos->state = MEI_FILE_DISCONNECTED;
-			pos->timer_count = 0;
-			if (pos == &dev->wd_cl)
-				dev->wd_pending = false;
-			else if (pos == &dev->iamthif_cl)
-				dev->iamthif_timer = 0;
-
-			/* prepare disconnect response */
-			(void)mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
-			disconnect_res =
-				(struct hbm_client_connect_response *)
-				&dev->wr_ext_msg.data;
-			disconnect_res->hbm_cmd = CLIENT_DISCONNECT_RES_CMD;
-			disconnect_res->host_addr = pos->host_client_id;
-			disconnect_res->me_addr = pos->me_client_id;
-			disconnect_res->status = 0;
-			break;
-		}
-	}
-}
-
-/**
- * mei_irq_thread_read_bus_message - bottom half read routine after ISR to
- * handle the read bus message cmd processing.
- *
- * @dev: the device structure
- * @mei_hdr: header of bus message
- */
-static void mei_irq_thread_read_bus_message(struct mei_device *dev,
-		struct mei_msg_hdr *mei_hdr)
-{
-	struct mei_bus_message *mei_msg;
-	struct mei_me_client *me_client;
-	struct hbm_host_version_response *version_res;
-	struct hbm_client_connect_response *connect_res;
-	struct hbm_client_connect_response *disconnect_res;
-	struct hbm_client_connect_request *disconnect_req;
-	struct hbm_flow_control *flow_control;
-	struct hbm_props_response *props_res;
-	struct hbm_host_enum_response *enum_res;
-	struct hbm_host_stop_request *stop_req;
-
-	/* read the message to our buffer */
-	BUG_ON(mei_hdr->length >= sizeof(dev->rd_msg_buf));
-	mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
-	mei_msg = (struct mei_bus_message *)dev->rd_msg_buf;
-
-	switch (mei_msg->hbm_cmd) {
-	case HOST_START_RES_CMD:
-		version_res = (struct hbm_host_version_response *) mei_msg;
-		if (version_res->host_version_supported) {
-			dev->version.major_version = HBM_MAJOR_VERSION;
-			dev->version.minor_version = HBM_MINOR_VERSION;
-			if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
-			    dev->init_clients_state == MEI_START_MESSAGE) {
-				dev->init_clients_timer = 0;
-				mei_host_enum_clients_message(dev);
-			} else {
-				dev->recvd_msg = false;
-				dev_dbg(&dev->pdev->dev, "IMEI reset due to received host start response bus message.\n");
-				mei_reset(dev, 1);
-				return;
-			}
-		} else {
-			u32 *buf = dev->wr_msg_buf;
-			const size_t len = sizeof(struct hbm_host_stop_request);
-
-			dev->version = version_res->me_max_version;
-
-			/* send stop message */
-			mei_hdr = mei_hbm_hdr(&buf[0], len);
-			stop_req = (struct hbm_host_stop_request *)&buf[1];
-			memset(stop_req, 0, len);
-			stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
-			stop_req->reason = DRIVER_STOP_REQUEST;
-
-			mei_write_message(dev, mei_hdr,
-					(unsigned char *)stop_req, len);
-			dev_dbg(&dev->pdev->dev, "version mismatch.\n");
-			return;
-		}
-
-		dev->recvd_msg = true;
-		dev_dbg(&dev->pdev->dev, "host start response message received.\n");
-		break;
-
-	case CLIENT_CONNECT_RES_CMD:
-		connect_res = (struct hbm_client_connect_response *) mei_msg;
-		mei_client_connect_response(dev, connect_res);
-		dev_dbg(&dev->pdev->dev, "client connect response message received.\n");
-		wake_up(&dev->wait_recvd_msg);
-		break;
-
-	case CLIENT_DISCONNECT_RES_CMD:
-		disconnect_res = (struct hbm_client_connect_response *) mei_msg;
-		mei_client_disconnect_response(dev, disconnect_res);
-		dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
-		wake_up(&dev->wait_recvd_msg);
-		break;
-
-	case MEI_FLOW_CONTROL_CMD:
-		flow_control = (struct hbm_flow_control *) mei_msg;
-		mei_client_flow_control_response(dev, flow_control);
-		dev_dbg(&dev->pdev->dev, "client flow control response message received.\n");
-		break;
-
-	case HOST_CLIENT_PROPERTIES_RES_CMD:
-		props_res = (struct hbm_props_response *)mei_msg;
-		me_client = &dev->me_clients[dev->me_client_presentation_num];
-
-		if (props_res->status || !dev->me_clients) {
-			dev_dbg(&dev->pdev->dev, "reset due to received host client properties response bus message wrong status.\n");
-			mei_reset(dev, 1);
-			return;
-		}
-
-		if (me_client->client_id != props_res->address) {
-			dev_err(&dev->pdev->dev,
-				"Host client properties reply mismatch\n");
-			mei_reset(dev, 1);
-
-			return;
-		}
-
-		if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
-		    dev->init_clients_state != MEI_CLIENT_PROPERTIES_MESSAGE) {
-			dev_err(&dev->pdev->dev,
-				"Unexpected client properties reply\n");
-			mei_reset(dev, 1);
-
-			return;
-		}
-
-		me_client->props = props_res->client_properties;
-		dev->me_client_index++;
-		dev->me_client_presentation_num++;
-
-		mei_host_client_enumerate(dev);
-
-		break;
-
-	case HOST_ENUM_RES_CMD:
-		enum_res = (struct hbm_host_enum_response *) mei_msg;
-		memcpy(dev->me_clients_map, enum_res->valid_addresses, 32);
-		if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
-		    dev->init_clients_state == MEI_ENUM_CLIENTS_MESSAGE) {
-				dev->init_clients_timer = 0;
-				dev->me_client_presentation_num = 0;
-				dev->me_client_index = 0;
-				mei_allocate_me_clients_storage(dev);
-				dev->init_clients_state =
-					MEI_CLIENT_PROPERTIES_MESSAGE;
-
-				mei_host_client_enumerate(dev);
-		} else {
-			dev_dbg(&dev->pdev->dev, "reset due to received host enumeration clients response bus message.\n");
-			mei_reset(dev, 1);
-			return;
-		}
-		break;
-
-	case HOST_STOP_RES_CMD:
-		dev->dev_state = MEI_DEV_DISABLED;
-		dev_dbg(&dev->pdev->dev, "resetting because of FW stop response.\n");
-		mei_reset(dev, 1);
-		break;
-
-	case CLIENT_DISCONNECT_REQ_CMD:
-		/* search for client */
-		disconnect_req = (struct hbm_client_connect_request *)mei_msg;
-		mei_client_disconnect_request(dev, disconnect_req);
-		break;
-
-	case ME_STOP_REQ_CMD:
-	{
-		/* prepare stop request: sent in next interrupt event */
-
-		const size_t len = sizeof(struct hbm_host_stop_request);
-
-		mei_hdr = mei_hbm_hdr((u32 *)&dev->wr_ext_msg.hdr, len);
-		stop_req = (struct hbm_host_stop_request *)&dev->wr_ext_msg.data;
-		memset(stop_req, 0, len);
-		stop_req->hbm_cmd = HOST_STOP_REQ_CMD;
-		stop_req->reason = DRIVER_STOP_REQUEST;
-		break;
-	}
-	default:
-		BUG();
-		break;
-
-	}
-}
-
 
 /**
  * _mei_hb_read - processes read related operation.
@@ -655,7 +201,7 @@
 
 	*slots -= mei_data2slots(sizeof(struct hbm_flow_control));
 
-	if (mei_send_flow_control(dev, cl)) {
+	if (mei_hbm_cl_flow_control_req(dev, cl)) {
 		cl->status = -ENODEV;
 		cb_pos->buf_idx = 0;
 		list_move_tail(&cb_pos->list, &cmpl_list->list);
@@ -691,8 +237,8 @@
 	}
 
 	cl->state = MEI_FILE_CONNECTING;
-	 *slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
-	if (mei_connect(dev, cl)) {
+	*slots -= mei_data2slots(sizeof(struct hbm_client_connect_request));
+	if (mei_hbm_cl_connect_req(dev, cl)) {
 		cl->status = -ENODEV;
 		cb_pos->buf_idx = 0;
 		list_del(&cb_pos->list);
@@ -717,25 +263,24 @@
 static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
 			struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
 {
-	struct mei_msg_hdr *mei_hdr;
+	struct mei_msg_hdr mei_hdr;
 	struct mei_cl *cl = cb->cl;
 	size_t len = cb->request_buffer.size - cb->buf_idx;
 	size_t msg_slots = mei_data2slots(len);
 
-	mei_hdr = (struct mei_msg_hdr *)&dev->wr_msg_buf[0];
-	mei_hdr->host_addr = cl->host_client_id;
-	mei_hdr->me_addr = cl->me_client_id;
-	mei_hdr->reserved = 0;
+	mei_hdr.host_addr = cl->host_client_id;
+	mei_hdr.me_addr = cl->me_client_id;
+	mei_hdr.reserved = 0;
 
 	if (*slots >= msg_slots) {
-		mei_hdr->length = len;
-		mei_hdr->msg_complete = 1;
+		mei_hdr.length = len;
+		mei_hdr.msg_complete = 1;
 	/* Split the message only if we can write the whole host buffer */
 	} else if (*slots == dev->hbuf_depth) {
 		msg_slots = *slots;
 		len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
-		mei_hdr->length = len;
-		mei_hdr->msg_complete = 0;
+		mei_hdr.length = len;
+		mei_hdr.msg_complete = 0;
 	} else {
 		/* wait for next time the host buffer is empty */
 		return 0;
@@ -743,23 +288,22 @@
 
 	dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
 			cb->request_buffer.size, cb->buf_idx);
-	dev_dbg(&dev->pdev->dev, "msg: len = %d complete = %d\n",
-			mei_hdr->length, mei_hdr->msg_complete);
+	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
 
 	*slots -=  msg_slots;
-	if (mei_write_message(dev, mei_hdr,
-		cb->request_buffer.data + cb->buf_idx, len)) {
+	if (mei_write_message(dev, &mei_hdr,
+			cb->request_buffer.data + cb->buf_idx)) {
 		cl->status = -ENODEV;
 		list_move_tail(&cb->list, &cmpl_list->list);
 		return -ENODEV;
 	}
 
-	if (mei_flow_ctrl_reduce(dev, cl))
+	if (mei_cl_flow_ctrl_reduce(cl))
 		return -ENODEV;
 
 	cl->status = 0;
-	cb->buf_idx += mei_hdr->length;
-	if (mei_hdr->msg_complete)
+	cb->buf_idx += mei_hdr.length;
+	if (mei_hdr.msg_complete)
 		list_move_tail(&cb->list, &dev->write_waiting_list.list);
 
 	return 0;
@@ -769,15 +313,14 @@
  * mei_irq_thread_read_handler - bottom half read routine after ISR to
  * handle the read processing.
  *
- * @cmpl_list: An instance of our list structure
  * @dev: the device structure
+ * @cmpl_list: An instance of our list structure
  * @slots: slots to read.
  *
  * returns 0 on success, <0 on failure.
  */
-static int mei_irq_thread_read_handler(struct mei_cl_cb *cmpl_list,
-		struct mei_device *dev,
-		s32 *slots)
+int mei_irq_read_handler(struct mei_device *dev,
+		struct mei_cl_cb *cmpl_list, s32 *slots)
 {
 	struct mei_msg_hdr *mei_hdr;
 	struct mei_cl *cl_pos = NULL;
@@ -785,13 +328,13 @@
 	int ret = 0;
 
 	if (!dev->rd_msg_hdr) {
-		dev->rd_msg_hdr = mei_mecbrw_read(dev);
+		dev->rd_msg_hdr = mei_read_hdr(dev);
 		dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
 		(*slots)--;
 		dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
 	}
 	mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
-	dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n", mei_hdr->length);
+	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
 
 	if (mei_hdr->reserved || !dev->rd_msg_hdr) {
 		dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
@@ -830,19 +373,18 @@
 	/* decide where to read the message too */
 	if (!mei_hdr->host_addr) {
 		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
-		mei_irq_thread_read_bus_message(dev, mei_hdr);
+		mei_hbm_dispatch(dev, mei_hdr);
 		dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
 	} else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
 		   (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
 		   (dev->iamthif_state == MEI_IAMTHIF_READING)) {
 		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
-		dev_dbg(&dev->pdev->dev, "mei_hdr->length =%d\n",
-				mei_hdr->length);
+
+		dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
 
 		ret = mei_amthif_irq_read_message(cmpl_list, dev, mei_hdr);
 		if (ret)
 			goto end;
-
 	} else {
 		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_client_message.\n");
 		ret = mei_irq_thread_read_client_message(cmpl_list,
@@ -869,15 +411,15 @@
 
 
 /**
- * mei_irq_thread_write_handler - bottom half write routine after
- * ISR to handle the write processing.
+ * mei_irq_write_handler -  dispatch write requests
+ *  after irq received
  *
  * @dev: the device structure
  * @cmpl_list: An instance of our list structure
  *
  * returns 0 on success, <0 on failure.
  */
-static int mei_irq_thread_write_handler(struct mei_device *dev,
+int mei_irq_write_handler(struct mei_device *dev,
 				struct mei_cl_cb *cmpl_list)
 {
 
@@ -887,7 +429,7 @@
 	s32 slots;
 	int ret;
 
-	if (!mei_hbuf_is_empty(dev)) {
+	if (!mei_hbuf_is_ready(dev)) {
 		dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
 		return 0;
 	}
@@ -930,16 +472,16 @@
 
 	if (dev->wr_ext_msg.hdr.length) {
 		mei_write_message(dev, &dev->wr_ext_msg.hdr,
-			dev->wr_ext_msg.data, dev->wr_ext_msg.hdr.length);
+				dev->wr_ext_msg.data);
 		slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
 		dev->wr_ext_msg.hdr.length = 0;
 	}
 	if (dev->dev_state == MEI_DEV_ENABLED) {
 		if (dev->wd_pending &&
-		    mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+		    mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
 			if (mei_wd_send(dev))
 				dev_dbg(&dev->pdev->dev, "wd send failed.\n");
-			else if (mei_flow_ctrl_reduce(dev, &dev->wd_cl))
+			else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
 				return -ENODEV;
 
 			dev->wd_pending = false;
@@ -978,7 +520,7 @@
 			break;
 		case MEI_FOP_IOCTL:
 			/* connect message */
-			if (mei_other_client_is_connecting(dev, cl))
+			if (mei_cl_is_other_connecting(cl))
 				continue;
 			ret = _mei_irq_thread_ioctl(dev, &slots, pos,
 						cl, cmpl_list);
@@ -998,7 +540,7 @@
 		cl = pos->cl;
 		if (cl == NULL)
 			continue;
-		if (mei_flow_ctrl_creds(dev, cl) <= 0) {
+		if (mei_cl_flow_ctrl_creds(cl) <= 0) {
 			dev_dbg(&dev->pdev->dev,
 				"No flow control credentials for client %d, not sending.\n",
 				cl->host_client_id);
@@ -1123,115 +665,3 @@
 	mutex_unlock(&dev->device_lock);
 }
 
-/**
- *  mei_interrupt_thread_handler - function called after ISR to handle the interrupt
- * processing.
- *
- * @irq: The irq number
- * @dev_id: pointer to the device structure
- *
- * returns irqreturn_t
- *
- */
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id)
-{
-	struct mei_device *dev = (struct mei_device *) dev_id;
-	struct mei_cl_cb complete_list;
-	struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
-	struct mei_cl *cl;
-	s32 slots;
-	int rets;
-	bool  bus_message_received;
-
-
-	dev_dbg(&dev->pdev->dev, "function called after ISR to handle the interrupt processing.\n");
-	/* initialize our complete list */
-	mutex_lock(&dev->device_lock);
-	mei_io_list_init(&complete_list);
-	dev->host_hw_state = mei_hcsr_read(dev);
-
-	/* Ack the interrupt here
-	 * In case of MSI we don't go through the quick handler */
-	if (pci_dev_msi_enabled(dev->pdev))
-		mei_reg_write(dev, H_CSR, dev->host_hw_state);
-
-	dev->me_hw_state = mei_mecsr_read(dev);
-
-	/* check if ME wants a reset */
-	if ((dev->me_hw_state & ME_RDY_HRA) == 0 &&
-	    dev->dev_state != MEI_DEV_RESETING &&
-	    dev->dev_state != MEI_DEV_INITIALIZING) {
-		dev_dbg(&dev->pdev->dev, "FW not ready.\n");
-		mei_reset(dev, 1);
-		mutex_unlock(&dev->device_lock);
-		return IRQ_HANDLED;
-	}
-
-	/*  check if we need to start the dev */
-	if ((dev->host_hw_state & H_RDY) == 0) {
-		if ((dev->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA) {
-			dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
-			dev->host_hw_state |= (H_IE | H_IG | H_RDY);
-			mei_hcsr_set(dev);
-			dev->dev_state = MEI_DEV_INIT_CLIENTS;
-			dev_dbg(&dev->pdev->dev, "link is established start sending messages.\n");
-			/* link is established
-			 * start sending messages.
-			 */
-			mei_host_start_message(dev);
-			mutex_unlock(&dev->device_lock);
-			return IRQ_HANDLED;
-		} else {
-			dev_dbg(&dev->pdev->dev, "FW not ready.\n");
-			mutex_unlock(&dev->device_lock);
-			return IRQ_HANDLED;
-		}
-	}
-	/* check slots available for reading */
-	slots = mei_count_full_read_slots(dev);
-	while (slots > 0) {
-		/* we have urgent data to send so break the read */
-		if (dev->wr_ext_msg.hdr.length)
-			break;
-		dev_dbg(&dev->pdev->dev, "slots =%08x\n", slots);
-		dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_handler.\n");
-		rets = mei_irq_thread_read_handler(&complete_list, dev, &slots);
-		if (rets)
-			goto end;
-	}
-	rets = mei_irq_thread_write_handler(dev, &complete_list);
-end:
-	dev_dbg(&dev->pdev->dev, "end of bottom half function.\n");
-	dev->host_hw_state = mei_hcsr_read(dev);
-	dev->mei_host_buffer_is_empty = mei_hbuf_is_empty(dev);
-
-	bus_message_received = false;
-	if (dev->recvd_msg && waitqueue_active(&dev->wait_recvd_msg)) {
-		dev_dbg(&dev->pdev->dev, "received waiting bus message\n");
-		bus_message_received = true;
-	}
-	mutex_unlock(&dev->device_lock);
-	if (bus_message_received) {
-		dev_dbg(&dev->pdev->dev, "wake up dev->wait_recvd_msg\n");
-		wake_up_interruptible(&dev->wait_recvd_msg);
-		bus_message_received = false;
-	}
-	if (list_empty(&complete_list.list))
-		return IRQ_HANDLED;
-
-
-	list_for_each_entry_safe(cb_pos, cb_next, &complete_list.list, list) {
-		cl = cb_pos->cl;
-		list_del(&cb_pos->list);
-		if (cl) {
-			if (cl != &dev->iamthif_cl) {
-				dev_dbg(&dev->pdev->dev, "completing call back.\n");
-				_mei_cmpl(cl, cb_pos);
-				cb_pos = NULL;
-			} else if (cl == &dev->iamthif_cl) {
-				mei_amthif_complete(dev, cb_pos);
-			}
-		}
-	}
-	return IRQ_HANDLED;
-}
diff --git a/drivers/misc/mei/iorw.c b/drivers/misc/mei/iorw.c
deleted file mode 100644
index eb93a1b..0000000
--- a/drivers/misc/mei/iorw.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- *
- * Intel Management Engine Interface (Intel MEI) Linux driver
- * Copyright (c) 2003-2012, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/aio.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/ioctl.h>
-#include <linux/cdev.h>
-#include <linux/list.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/uuid.h>
-#include <linux/jiffies.h>
-#include <linux/uaccess.h>
-
-
-#include "mei_dev.h"
-#include "hw.h"
-#include <linux/mei.h>
-#include "interface.h"
-
-/**
- * mei_io_cb_free - free mei_cb_private related memory
- *
- * @cb: mei callback struct
- */
-void mei_io_cb_free(struct mei_cl_cb *cb)
-{
-	if (cb == NULL)
-		return;
-
-	kfree(cb->request_buffer.data);
-	kfree(cb->response_buffer.data);
-	kfree(cb);
-}
-/**
- * mei_io_cb_init - allocate and initialize io callback
- *
- * @cl - mei client
- * @file: pointer to file structure
- *
- * returns mei_cl_cb pointer or NULL;
- */
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
-{
-	struct mei_cl_cb *cb;
-
-	cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
-	if (!cb)
-		return NULL;
-
-	mei_io_list_init(cb);
-
-	cb->file_object = fp;
-	cb->cl = cl;
-	cb->buf_idx = 0;
-	return cb;
-}
-
-
-/**
- * mei_io_cb_alloc_req_buf - allocate request buffer
- *
- * @cb -  io callback structure
- * @size: size of the buffer
- *
- * returns 0 on success
- *         -EINVAL if cb is NULL
- *         -ENOMEM if allocation failed
- */
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
-{
-	if (!cb)
-		return -EINVAL;
-
-	if (length == 0)
-		return 0;
-
-	cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
-	if (!cb->request_buffer.data)
-		return -ENOMEM;
-	cb->request_buffer.size = length;
-	return 0;
-}
-/**
- * mei_io_cb_alloc_req_buf - allocate respose buffer
- *
- * @cb -  io callback structure
- * @size: size of the buffer
- *
- * returns 0 on success
- *         -EINVAL if cb is NULL
- *         -ENOMEM if allocation failed
- */
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
-{
-	if (!cb)
-		return -EINVAL;
-
-	if (length == 0)
-		return 0;
-
-	cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
-	if (!cb->response_buffer.data)
-		return -ENOMEM;
-	cb->response_buffer.size = length;
-	return 0;
-}
-
-
-/**
- * mei_me_cl_by_id return index to me_clients for client_id
- *
- * @dev: the device structure
- * @client_id: me client id
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns index on success, -ENOENT on failure.
- */
-
-int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
-{
-	int i;
-	for (i = 0; i < dev->me_clients_num; i++)
-		if (dev->me_clients[i].client_id == client_id)
-			break;
-	if (WARN_ON(dev->me_clients[i].client_id != client_id))
-		return -ENOENT;
-
-	if (i == dev->me_clients_num)
-		return -ENOENT;
-
-	return i;
-}
-
-/**
- * mei_ioctl_connect_client - the connect to fw client IOCTL function
- *
- * @dev: the device structure
- * @data: IOCTL connect data, input and output parameters
- * @file: private data of the file object
- *
- * Locking: called under "dev->device_lock" lock
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_ioctl_connect_client(struct file *file,
-			struct mei_connect_client_data *data)
-{
-	struct mei_device *dev;
-	struct mei_cl_cb *cb;
-	struct mei_client *client;
-	struct mei_cl *cl;
-	long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
-	int i;
-	int err;
-	int rets;
-
-	cl = file->private_data;
-	if (WARN_ON(!cl || !cl->dev))
-		return -ENODEV;
-
-	dev = cl->dev;
-
-	dev_dbg(&dev->pdev->dev, "mei_ioctl_connect_client() Entry\n");
-
-	/* buffered ioctl cb */
-	cb = mei_io_cb_init(cl, file);
-	if (!cb) {
-		rets = -ENOMEM;
-		goto end;
-	}
-
-	cb->fop_type = MEI_FOP_IOCTL;
-
-	if (dev->dev_state != MEI_DEV_ENABLED) {
-		rets = -ENODEV;
-		goto end;
-	}
-	if (cl->state != MEI_FILE_INITIALIZING &&
-	    cl->state != MEI_FILE_DISCONNECTED) {
-		rets = -EBUSY;
-		goto end;
-	}
-
-	/* find ME client we're trying to connect to */
-	i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
-	if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
-		cl->me_client_id = dev->me_clients[i].client_id;
-		cl->state = MEI_FILE_CONNECTING;
-	}
-
-	dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
-			cl->me_client_id);
-	dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
-			dev->me_clients[i].props.protocol_version);
-	dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
-			dev->me_clients[i].props.max_msg_length);
-
-	/* if we're connecting to amthi client then we will use the
-	 * existing connection
-	 */
-	if (uuid_le_cmp(data->in_client_uuid, mei_amthi_guid) == 0) {
-		dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
-		if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
-			rets = -ENODEV;
-			goto end;
-		}
-		clear_bit(cl->host_client_id, dev->host_clients_map);
-		mei_me_cl_unlink(dev, cl);
-
-		kfree(cl);
-		cl = NULL;
-		file->private_data = &dev->iamthif_cl;
-
-		client = &data->out_client_properties;
-		client->max_msg_length =
-			dev->me_clients[i].props.max_msg_length;
-		client->protocol_version =
-			dev->me_clients[i].props.protocol_version;
-		rets = dev->iamthif_cl.status;
-
-		goto end;
-	}
-
-	if (cl->state != MEI_FILE_CONNECTING) {
-		rets = -ENODEV;
-		goto end;
-	}
-
-
-	/* prepare the output buffer */
-	client = &data->out_client_properties;
-	client->max_msg_length = dev->me_clients[i].props.max_msg_length;
-	client->protocol_version = dev->me_clients[i].props.protocol_version;
-	dev_dbg(&dev->pdev->dev, "Can connect?\n");
-	if (dev->mei_host_buffer_is_empty
-	    && !mei_other_client_is_connecting(dev, cl)) {
-		dev_dbg(&dev->pdev->dev, "Sending Connect Message\n");
-		dev->mei_host_buffer_is_empty = false;
-		if (mei_connect(dev, cl)) {
-			dev_dbg(&dev->pdev->dev, "Sending connect message - failed\n");
-			rets = -ENODEV;
-			goto end;
-		} else {
-			dev_dbg(&dev->pdev->dev, "Sending connect message - succeeded\n");
-			cl->timer_count = MEI_CONNECT_TIMEOUT;
-			list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
-		}
-
-
-	} else {
-		dev_dbg(&dev->pdev->dev, "Queuing the connect request due to device busy\n");
-		dev_dbg(&dev->pdev->dev, "add connect cb to control write list.\n");
-		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-	}
-	mutex_unlock(&dev->device_lock);
-	err = wait_event_timeout(dev->wait_recvd_msg,
-			(MEI_FILE_CONNECTED == cl->state ||
-			 MEI_FILE_DISCONNECTED == cl->state), timeout);
-
-	mutex_lock(&dev->device_lock);
-	if (MEI_FILE_CONNECTED == cl->state) {
-		dev_dbg(&dev->pdev->dev, "successfully connected to FW client.\n");
-		rets = cl->status;
-		goto end;
-	} else {
-		dev_dbg(&dev->pdev->dev, "failed to connect to FW client.cl->state = %d.\n",
-		    cl->state);
-		if (!err) {
-			dev_dbg(&dev->pdev->dev,
-				"wait_event_interruptible_timeout failed on client"
-				" connect message fw response message.\n");
-		}
-		rets = -EFAULT;
-
-		mei_io_list_flush(&dev->ctrl_rd_list, cl);
-		mei_io_list_flush(&dev->ctrl_wr_list, cl);
-		goto end;
-	}
-	rets = 0;
-end:
-	dev_dbg(&dev->pdev->dev, "free connect cb memory.");
-	mei_io_cb_free(cb);
-	return rets;
-}
-
-/**
- * mei_start_read - the start read client message function.
- *
- * @dev: the device structure
- * @if_num:  minor number
- * @cl: private data of the file object
- *
- * returns 0 on success, <0 on failure.
- */
-int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
-{
-	struct mei_cl_cb *cb;
-	int rets;
-	int i;
-
-	if (cl->state != MEI_FILE_CONNECTED)
-		return -ENODEV;
-
-	if (dev->dev_state != MEI_DEV_ENABLED)
-		return -ENODEV;
-
-	if (cl->read_pending || cl->read_cb) {
-		dev_dbg(&dev->pdev->dev, "read is pending.\n");
-		return -EBUSY;
-	}
-	i = mei_me_cl_by_id(dev, cl->me_client_id);
-	if (i < 0) {
-		dev_err(&dev->pdev->dev, "no such me client %d\n",
-			cl->me_client_id);
-		return  -ENODEV;
-	}
-
-	cb = mei_io_cb_init(cl, NULL);
-	if (!cb)
-		return -ENOMEM;
-
-	rets = mei_io_cb_alloc_resp_buf(cb,
-			dev->me_clients[i].props.max_msg_length);
-	if (rets)
-		goto err;
-
-	cb->fop_type = MEI_FOP_READ;
-	cl->read_cb = cb;
-	if (dev->mei_host_buffer_is_empty) {
-		dev->mei_host_buffer_is_empty = false;
-		if (mei_send_flow_control(dev, cl)) {
-			rets = -ENODEV;
-			goto err;
-		}
-		list_add_tail(&cb->list, &dev->read_list.list);
-	} else {
-		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
-	}
-	return rets;
-err:
-	mei_io_cb_free(cb);
-	return rets;
-}
-
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 43fb52f..903f809 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -37,79 +37,11 @@
 #include <linux/interrupt.h>
 #include <linux/miscdevice.h>
 
-#include "mei_dev.h"
 #include <linux/mei.h>
-#include "interface.h"
 
-/* AMT device is a singleton on the platform */
-static struct pci_dev *mei_pdev;
-
-/* mei_pci_tbl - PCI Device ID Table */
-static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
-	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
-
-	/* required last entry */
-	{0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
-
-static DEFINE_MUTEX(mei_mutex);
-
-
-/**
- * find_read_list_entry - find read list entry
- *
- * @dev: device structure
- * @file: pointer to file structure
- *
- * returns cb on success, NULL on error
- */
-static struct mei_cl_cb *find_read_list_entry(
-		struct mei_device *dev,
-		struct mei_cl *cl)
-{
-	struct mei_cl_cb *pos = NULL;
-	struct mei_cl_cb *next = NULL;
-
-	dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
-	list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
-		if (mei_cl_cmp_id(cl, pos->cl))
-			return pos;
-	return NULL;
-}
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "client.h"
 
 /**
  * mei_open - the open function
@@ -121,16 +53,20 @@
  */
 static int mei_open(struct inode *inode, struct file *file)
 {
+	struct miscdevice *misc = file->private_data;
+	struct pci_dev *pdev;
 	struct mei_cl *cl;
 	struct mei_device *dev;
-	unsigned long cl_id;
+
 	int err;
 
 	err = -ENODEV;
-	if (!mei_pdev)
+	if (!misc->parent)
 		goto out;
 
-	dev = pci_get_drvdata(mei_pdev);
+	pdev = container_of(misc->parent, struct pci_dev, dev);
+
+	dev = pci_get_drvdata(pdev);
 	if (!dev)
 		goto out;
 
@@ -153,24 +89,9 @@
 		goto out_unlock;
 	}
 
-	cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
-	if (cl_id >= MEI_CLIENTS_MAX) {
-		dev_err(&dev->pdev->dev, "client_id exceded %d",
-				MEI_CLIENTS_MAX) ;
+	err = mei_cl_link(cl, MEI_HOST_CLIENT_ID_ANY);
+	if (err)
 		goto out_unlock;
-	}
-
-	cl->host_client_id  = cl_id;
-
-	dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
-
-	dev->open_handle_count++;
-
-	list_add_tail(&cl->link, &dev->file_list);
-
-	set_bit(cl->host_client_id, dev->host_clients_map);
-	cl->state = MEI_FILE_INITIALIZING;
-	cl->sm_state = 0;
 
 	file->private_data = cl;
 	mutex_unlock(&dev->device_lock);
@@ -216,7 +137,7 @@
 		    "ME client = %d\n",
 		    cl->host_client_id,
 		    cl->me_client_id);
-		rets = mei_disconnect_host_client(dev, cl);
+		rets = mei_cl_disconnect(cl);
 	}
 	mei_cl_flush_queues(cl);
 	dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
@@ -227,12 +148,13 @@
 		clear_bit(cl->host_client_id, dev->host_clients_map);
 		dev->open_handle_count--;
 	}
-	mei_me_cl_unlink(dev, cl);
+	mei_cl_unlink(cl);
+
 
 	/* free read cb */
 	cb = NULL;
 	if (cl->read_cb) {
-		cb = find_read_list_entry(dev, cl);
+		cb = mei_cl_find_read_cb(cl);
 		/* Remove entry from read list */
 		if (cb)
 			list_del(&cb->list);
@@ -322,7 +244,7 @@
 		goto out;
 	}
 
-	err = mei_start_read(dev, cl);
+	err = mei_cl_read_start(cl);
 	if (err && err != -EBUSY) {
 		dev_dbg(&dev->pdev->dev,
 			"mei start read failure with status = %d\n", err);
@@ -393,14 +315,13 @@
 		goto out;
 
 free:
-	cb_pos = find_read_list_entry(dev, cl);
+	cb_pos = mei_cl_find_read_cb(cl);
 	/* Remove entry from read list */
 	if (cb_pos)
 		list_del(&cb_pos->list);
 	mei_io_cb_free(cb);
 	cl->reading_state = MEI_IDLE;
 	cl->read_cb = NULL;
-	cl->read_pending = 0;
 out:
 	dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
 	mutex_unlock(&dev->device_lock);
@@ -475,16 +396,15 @@
 	/* free entry used in read */
 	if (cl->reading_state == MEI_READ_COMPLETE) {
 		*offset = 0;
-		write_cb = find_read_list_entry(dev, cl);
+		write_cb = mei_cl_find_read_cb(cl);
 		if (write_cb) {
 			list_del(&write_cb->list);
 			mei_io_cb_free(write_cb);
 			write_cb = NULL;
 			cl->reading_state = MEI_IDLE;
 			cl->read_cb = NULL;
-			cl->read_pending = 0;
 		}
-	} else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
+	} else if (cl->reading_state == MEI_IDLE)
 		*offset = 0;
 
 
@@ -519,7 +439,7 @@
 
 		if (rets) {
 			dev_err(&dev->pdev->dev,
-				"amthi write failed with status = %d\n", rets);
+				"amthif write failed with status = %d\n", rets);
 			goto err;
 		}
 		mutex_unlock(&dev->device_lock);
@@ -530,20 +450,20 @@
 
 	dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
 	    cl->host_client_id, cl->me_client_id);
-	rets = mei_flow_ctrl_creds(dev, cl);
+	rets = mei_cl_flow_ctrl_creds(cl);
 	if (rets < 0)
 		goto err;
 
-	if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
+	if (rets == 0 || !dev->hbuf_is_ready) {
 		write_cb->buf_idx = 0;
 		mei_hdr.msg_complete = 0;
 		cl->writing_state = MEI_WRITING;
 		goto out;
 	}
 
-	dev->mei_host_buffer_is_empty = false;
-	if (length >  mei_hbuf_max_data(dev)) {
-		mei_hdr.length = mei_hbuf_max_data(dev);
+	dev->hbuf_is_ready = false;
+	if (length >  mei_hbuf_max_len(dev)) {
+		mei_hdr.length = mei_hbuf_max_len(dev);
 		mei_hdr.msg_complete = 0;
 	} else {
 		mei_hdr.length = length;
@@ -552,10 +472,10 @@
 	mei_hdr.host_addr = cl->host_client_id;
 	mei_hdr.me_addr = cl->me_client_id;
 	mei_hdr.reserved = 0;
-	dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
-	    *((u32 *) &mei_hdr));
-	if (mei_write_message(dev, &mei_hdr,
-		write_cb->request_buffer.data, mei_hdr.length)) {
+
+	dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
+		MEI_HDR_PRM(&mei_hdr));
+	if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) {
 		rets = -ENODEV;
 		goto err;
 	}
@@ -564,7 +484,7 @@
 
 out:
 	if (mei_hdr.msg_complete) {
-		if (mei_flow_ctrl_reduce(dev, cl)) {
+		if (mei_cl_flow_ctrl_reduce(cl)) {
 			rets = -ENODEV;
 			goto err;
 		}
@@ -582,6 +502,103 @@
 	return rets;
 }
 
+/**
+ * mei_ioctl_connect_client - the connect to fw client IOCTL function
+ *
+ * @dev: the device structure
+ * @data: IOCTL connect data, input and output parameters
+ * @file: private data of the file object
+ *
+ * Locking: called under "dev->device_lock" lock
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_ioctl_connect_client(struct file *file,
+			struct mei_connect_client_data *data)
+{
+	struct mei_device *dev;
+	struct mei_client *client;
+	struct mei_cl *cl;
+	int i;
+	int rets;
+
+	cl = file->private_data;
+	if (WARN_ON(!cl || !cl->dev))
+		return -ENODEV;
+
+	dev = cl->dev;
+
+	if (dev->dev_state != MEI_DEV_ENABLED) {
+		rets = -ENODEV;
+		goto end;
+	}
+
+	if (cl->state != MEI_FILE_INITIALIZING &&
+	    cl->state != MEI_FILE_DISCONNECTED) {
+		rets = -EBUSY;
+		goto end;
+	}
+
+	/* find ME client we're trying to connect to */
+	i = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
+	if (i >= 0 && !dev->me_clients[i].props.fixed_address) {
+		cl->me_client_id = dev->me_clients[i].client_id;
+		cl->state = MEI_FILE_CONNECTING;
+	}
+
+	dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n",
+			cl->me_client_id);
+	dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n",
+			dev->me_clients[i].props.protocol_version);
+	dev_dbg(&dev->pdev->dev, "FW Client - Max Msg Len = %d\n",
+			dev->me_clients[i].props.max_msg_length);
+
+	/* if we're connecting to amthif client then we will use the
+	 * existing connection
+	 */
+	if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
+		dev_dbg(&dev->pdev->dev, "FW Client is amthi\n");
+		if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
+			rets = -ENODEV;
+			goto end;
+		}
+		clear_bit(cl->host_client_id, dev->host_clients_map);
+		mei_cl_unlink(cl);
+
+		kfree(cl);
+		cl = NULL;
+		file->private_data = &dev->iamthif_cl;
+
+		client = &data->out_client_properties;
+		client->max_msg_length =
+			dev->me_clients[i].props.max_msg_length;
+		client->protocol_version =
+			dev->me_clients[i].props.protocol_version;
+		rets = dev->iamthif_cl.status;
+
+		goto end;
+	}
+
+	if (cl->state != MEI_FILE_CONNECTING) {
+		rets = -ENODEV;
+		goto end;
+	}
+
+
+	/* prepare the output buffer */
+	client = &data->out_client_properties;
+	client->max_msg_length = dev->me_clients[i].props.max_msg_length;
+	client->protocol_version = dev->me_clients[i].props.protocol_version;
+	dev_dbg(&dev->pdev->dev, "Can connect?\n");
+
+
+	rets = mei_cl_connect(cl, file);
+
+end:
+	dev_dbg(&dev->pdev->dev, "free connect cb memory.");
+	return rets;
+}
+
 
 /**
  * mei_ioctl - the IOCTL function
@@ -630,6 +647,7 @@
 		rets = -EFAULT;
 		goto out;
 	}
+
 	rets = mei_ioctl_connect_client(file, connect_data);
 
 	/* if all is ok, copying the data back to user. */
@@ -726,7 +744,6 @@
 	.llseek = no_llseek
 };
 
-
 /*
  * Misc Device Struct
  */
@@ -736,300 +753,17 @@
 		.minor = MISC_DYNAMIC_MINOR,
 };
 
-/**
- * mei_quirk_probe - probe for devices that doesn't valid ME interface
- * @pdev: PCI device structure
- * @ent: entry into pci_device_table
- *
- * returns true if ME Interface is valid, false otherwise
- */
-static bool mei_quirk_probe(struct pci_dev *pdev,
-				const struct pci_device_id *ent)
+int mei_register(struct device *dev)
 {
-	u32 reg;
-	if (ent->device == MEI_DEV_ID_PBG_1) {
-		pci_read_config_dword(pdev, 0x48, &reg);
-		/* make sure that bit 9 is up and bit 10 is down */
-		if ((reg & 0x600) == 0x200) {
-			dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
-			return false;
-		}
-	}
-	return true;
-}
-/**
- * mei_probe - Device Initialization Routine
- *
- * @pdev: PCI device structure
- * @ent: entry in kcs_pci_tbl
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_probe(struct pci_dev *pdev,
-				const struct pci_device_id *ent)
-{
-	struct mei_device *dev;
-	int err;
-
-	mutex_lock(&mei_mutex);
-
-	if (!mei_quirk_probe(pdev, ent)) {
-		err = -ENODEV;
-		goto end;
-	}
-
-	if (mei_pdev) {
-		err = -EEXIST;
-		goto end;
-	}
-	/* enable pci dev */
-	err = pci_enable_device(pdev);
-	if (err) {
-		dev_err(&pdev->dev, "failed to enable pci device.\n");
-		goto end;
-	}
-	/* set PCI host mastering  */
-	pci_set_master(pdev);
-	/* pci request regions for mei driver */
-	err = pci_request_regions(pdev, KBUILD_MODNAME);
-	if (err) {
-		dev_err(&pdev->dev, "failed to get pci regions.\n");
-		goto disable_device;
-	}
-	/* allocates and initializes the mei dev structure */
-	dev = mei_device_init(pdev);
-	if (!dev) {
-		err = -ENOMEM;
-		goto release_regions;
-	}
-	/* mapping  IO device memory */
-	dev->mem_addr = pci_iomap(pdev, 0, 0);
-	if (!dev->mem_addr) {
-		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
-		err = -ENOMEM;
-		goto free_device;
-	}
-	pci_enable_msi(pdev);
-
-	 /* request and enable interrupt */
-	if (pci_dev_msi_enabled(pdev))
-		err = request_threaded_irq(pdev->irq,
-			NULL,
-			mei_interrupt_thread_handler,
-			IRQF_ONESHOT, KBUILD_MODNAME, dev);
-	else
-		err = request_threaded_irq(pdev->irq,
-			mei_interrupt_quick_handler,
-			mei_interrupt_thread_handler,
-			IRQF_SHARED, KBUILD_MODNAME, dev);
-
-	if (err) {
-		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
-		       pdev->irq);
-		goto disable_msi;
-	}
-	INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
-	INIT_WORK(&dev->init_work, mei_host_client_init);
-
-	if (mei_hw_init(dev)) {
-		dev_err(&pdev->dev, "init hw failure.\n");
-		err = -ENODEV;
-		goto release_irq;
-	}
-
-	err = misc_register(&mei_misc_device);
-	if (err)
-		goto release_irq;
-
-	mei_pdev = pdev;
-	pci_set_drvdata(pdev, dev);
-
-
-	schedule_delayed_work(&dev->timer_work, HZ);
-
-	mutex_unlock(&mei_mutex);
-
-	pr_debug("initialization successful.\n");
-
-	return 0;
-
-release_irq:
-	/* disable interrupts */
-	dev->host_hw_state = mei_hcsr_read(dev);
-	mei_disable_interrupts(dev);
-	flush_scheduled_work();
-	free_irq(pdev->irq, dev);
-disable_msi:
-	pci_disable_msi(pdev);
-	pci_iounmap(pdev, dev->mem_addr);
-free_device:
-	kfree(dev);
-release_regions:
-	pci_release_regions(pdev);
-disable_device:
-	pci_disable_device(pdev);
-end:
-	mutex_unlock(&mei_mutex);
-	dev_err(&pdev->dev, "initialization failed.\n");
-	return err;
+	mei_misc_device.parent = dev;
+	return misc_register(&mei_misc_device);
 }
 
-/**
- * mei_remove - Device Removal Routine
- *
- * @pdev: PCI device structure
- *
- * mei_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
- */
-static void mei_remove(struct pci_dev *pdev)
+void mei_deregister(void)
 {
-	struct mei_device *dev;
-
-	if (mei_pdev != pdev)
-		return;
-
-	dev = pci_get_drvdata(pdev);
-	if (!dev)
-		return;
-
-	mutex_lock(&dev->device_lock);
-
-	cancel_delayed_work(&dev->timer_work);
-
-	mei_wd_stop(dev);
-
-	mei_pdev = NULL;
-
-	if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
-		dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
-		mei_disconnect_host_client(dev, &dev->iamthif_cl);
-	}
-	if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
-		dev->wd_cl.state = MEI_FILE_DISCONNECTING;
-		mei_disconnect_host_client(dev, &dev->wd_cl);
-	}
-
-	/* Unregistering watchdog device */
-	mei_watchdog_unregister(dev);
-
-	/* remove entry if already in list */
-	dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
-	mei_me_cl_unlink(dev, &dev->wd_cl);
-	mei_me_cl_unlink(dev, &dev->iamthif_cl);
-
-	dev->iamthif_current_cb = NULL;
-	dev->me_clients_num = 0;
-
-	mutex_unlock(&dev->device_lock);
-
-	flush_scheduled_work();
-
-	/* disable interrupts */
-	mei_disable_interrupts(dev);
-
-	free_irq(pdev->irq, dev);
-	pci_disable_msi(pdev);
-	pci_set_drvdata(pdev, NULL);
-
-	if (dev->mem_addr)
-		pci_iounmap(pdev, dev->mem_addr);
-
-	kfree(dev);
-
-	pci_release_regions(pdev);
-	pci_disable_device(pdev);
-
 	misc_deregister(&mei_misc_device);
-}
-#ifdef CONFIG_PM
-static int mei_pci_suspend(struct device *device)
-{
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct mei_device *dev = pci_get_drvdata(pdev);
-	int err;
-
-	if (!dev)
-		return -ENODEV;
-	mutex_lock(&dev->device_lock);
-
-	cancel_delayed_work(&dev->timer_work);
-
-	/* Stop watchdog if exists */
-	err = mei_wd_stop(dev);
-	/* Set new mei state */
-	if (dev->dev_state == MEI_DEV_ENABLED ||
-	    dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
-		dev->dev_state = MEI_DEV_POWER_DOWN;
-		mei_reset(dev, 0);
-	}
-	mutex_unlock(&dev->device_lock);
-
-	free_irq(pdev->irq, dev);
-	pci_disable_msi(pdev);
-
-	return err;
+	mei_misc_device.parent = NULL;
 }
 
-static int mei_pci_resume(struct device *device)
-{
-	struct pci_dev *pdev = to_pci_dev(device);
-	struct mei_device *dev;
-	int err;
-
-	dev = pci_get_drvdata(pdev);
-	if (!dev)
-		return -ENODEV;
-
-	pci_enable_msi(pdev);
-
-	/* request and enable interrupt */
-	if (pci_dev_msi_enabled(pdev))
-		err = request_threaded_irq(pdev->irq,
-			NULL,
-			mei_interrupt_thread_handler,
-			IRQF_ONESHOT, KBUILD_MODNAME, dev);
-	else
-		err = request_threaded_irq(pdev->irq,
-			mei_interrupt_quick_handler,
-			mei_interrupt_thread_handler,
-			IRQF_SHARED, KBUILD_MODNAME, dev);
-
-	if (err) {
-		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
-				pdev->irq);
-		return err;
-	}
-
-	mutex_lock(&dev->device_lock);
-	dev->dev_state = MEI_DEV_POWER_UP;
-	mei_reset(dev, 1);
-	mutex_unlock(&dev->device_lock);
-
-	/* Start timer if stopped in suspend */
-	schedule_delayed_work(&dev->timer_work, HZ);
-
-	return err;
-}
-static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
-#define MEI_PM_OPS	(&mei_pm_ops)
-#else
-#define MEI_PM_OPS	NULL
-#endif /* CONFIG_PM */
-/*
- *  PCI driver structure
- */
-static struct pci_driver mei_driver = {
-	.name = KBUILD_MODNAME,
-	.id_table = mei_pci_tbl,
-	.probe = mei_probe,
-	.remove = mei_remove,
-	.shutdown = mei_remove,
-	.driver.pm = MEI_PM_OPS,
-};
-
-module_pci_driver(mei_driver);
-
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
 MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 25da045..cb80166 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -21,7 +21,9 @@
 #include <linux/watchdog.h>
 #include <linux/poll.h>
 #include <linux/mei.h>
+
 #include "hw.h"
+#include "hw-me-regs.h"
 
 /*
  * watch dog definition
@@ -44,7 +46,7 @@
 /*
  * AMTHI Client UUID
  */
-extern const uuid_le mei_amthi_guid;
+extern const uuid_le mei_amthif_guid;
 
 /*
  * Watchdog Client UUID
@@ -65,12 +67,18 @@
  * Number of File descriptors/handles
  * that can be opened to the driver.
  *
- * Limit to 253: 256 Total Clients
+ * Limit to 255: 256 Total Clients
  * minus internal client for MEI Bus Messags
- * minus internal client for AMTHI
- * minus internal client for Watchdog
  */
-#define  MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 3)
+#define  MEI_MAX_OPEN_HANDLE_COUNT (MEI_CLIENTS_MAX - 1)
+
+/*
+ * Internal Clients Number
+ */
+#define MEI_HOST_CLIENT_ID_ANY        (-1)
+#define MEI_HBM_HOST_CLIENT_ID         0 /* not used, just for documentation */
+#define MEI_WD_HOST_CLIENT_ID          1
+#define MEI_IAMTHIF_HOST_CLIENT_ID     2
 
 
 /* File state */
@@ -150,6 +158,19 @@
 	unsigned char *data;
 };
 
+/**
+ * struct mei_me_client - representation of me (fw) client
+ *
+ * @props  - client properties
+ * @client_id - me client id
+ * @mei_flow_ctrl_creds - flow control credits
+ */
+struct mei_me_client {
+	struct mei_client_properties props;
+	u8 client_id;
+	u8 mei_flow_ctrl_creds;
+};
+
 
 struct mei_cl;
 
@@ -178,7 +199,6 @@
 	wait_queue_head_t tx_wait;
 	wait_queue_head_t rx_wait;
 	wait_queue_head_t wait;
-	int read_pending;
 	int status;
 	/* ID of client connected */
 	u8 host_client_id;
@@ -191,10 +211,67 @@
 	struct mei_cl_cb *read_cb;
 };
 
+/** struct mei_hw_ops
+ *
+ * @host_set_ready   - notify FW that host side is ready
+ * @host_is_ready    - query for host readiness
+
+ * @hw_is_ready      - query if hw is ready
+ * @hw_reset         - reset hw
+ * @hw_config        - configure hw
+
+ * @intr_clear       - clear pending interrupts
+ * @intr_enable      - enable interrupts
+ * @intr_disable     - disable interrupts
+
+ * @hbuf_free_slots  - query for write buffer empty slots
+ * @hbuf_is_ready    - query if write buffer is empty
+ * @hbuf_max_len     - query for write buffer max len
+
+ * @write            - write a message to FW
+
+ * @rdbuf_full_slots - query how many slots are filled
+
+ * @read_hdr         - get first 4 bytes (header)
+ * @read             - read a buffer from the FW
+ */
+struct mei_hw_ops {
+
+	void (*host_set_ready) (struct mei_device *dev);
+	bool (*host_is_ready) (struct mei_device *dev);
+
+	bool (*hw_is_ready) (struct mei_device *dev);
+	void (*hw_reset) (struct mei_device *dev, bool enable);
+	void (*hw_config) (struct mei_device *dev);
+
+	void (*intr_clear) (struct mei_device *dev);
+	void (*intr_enable) (struct mei_device *dev);
+	void (*intr_disable) (struct mei_device *dev);
+
+	int (*hbuf_free_slots) (struct mei_device *dev);
+	bool (*hbuf_is_ready) (struct mei_device *dev);
+	size_t (*hbuf_max_len) (const struct mei_device *dev);
+
+	int (*write)(struct mei_device *dev,
+		     struct mei_msg_hdr *hdr,
+		     unsigned char *buf);
+
+	int (*rdbuf_full_slots)(struct mei_device *dev);
+
+	u32 (*read_hdr)(const struct mei_device *dev);
+	int (*read) (struct mei_device *dev,
+		     unsigned char *buf, unsigned long len);
+};
+
 /**
  * struct mei_device -  MEI private device struct
- * @hbuf_depth - depth of host(write) buffer
- * @wr_ext_msg - buffer for hbm control responses (set in read cycle)
+
+ * @mem_addr - mem mapped base register address
+
+ * @hbuf_depth - depth of hardware host/write buffer is slots
+ * @hbuf_is_ready - query if the host host/write buffer is ready
+ * @wr_msg - the buffer for hbm control messages
+ * @wr_ext_msg - the buffer for hbm control responses (set in read cycle)
  */
 struct mei_device {
 	struct pci_dev *pdev;	/* pointer to pci device struct */
@@ -213,24 +290,14 @@
 	 */
 	struct list_head file_list;
 	long open_handle_count;
-	/*
-	 * memory of device
-	 */
-	unsigned int mem_base;
-	unsigned int mem_length;
-	void __iomem *mem_addr;
+
 	/*
 	 * lock for the device
 	 */
 	struct mutex device_lock; /* device lock */
 	struct delayed_work timer_work;	/* MEI timer delayed work (timeouts) */
 	bool recvd_msg;
-	/*
-	 * hw states of host and fw(ME)
-	 */
-	u32 host_hw_state;
-	u32 me_hw_state;
-	u8  hbuf_depth;
+
 	/*
 	 * waiting queue for receive message from FW
 	 */
@@ -243,11 +310,20 @@
 	enum mei_dev_state dev_state;
 	enum mei_init_clients_states init_clients_state;
 	u16 init_clients_timer;
-	bool need_reset;
 
 	unsigned char rd_msg_buf[MEI_RD_MSG_BUF_SIZE];	/* control messages */
 	u32 rd_msg_hdr;
-	u32 wr_msg_buf[128];	/* used for control messages */
+
+	/* write buffer */
+	u8 hbuf_depth;
+	bool hbuf_is_ready;
+
+	/* used for control messages */
+	struct {
+		struct mei_msg_hdr hdr;
+		unsigned char data[128];
+	} wr_msg;
+
 	struct {
 		struct mei_msg_hdr hdr;
 		unsigned char data[4];	/* All HBM messages are 4 bytes */
@@ -261,7 +337,6 @@
 	u8 me_clients_num;
 	u8 me_client_presentation_num;
 	u8 me_client_index;
-	bool mei_host_buffer_is_empty;
 
 	struct mei_cl wd_cl;
 	enum mei_wd_states wd_state;
@@ -289,6 +364,9 @@
 	bool iamthif_canceled;
 
 	struct work_struct init_work;
+
+	const struct mei_hw_ops *ops;
+	char hw[0] __aligned(sizeof(void *));
 };
 
 static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
@@ -300,96 +378,28 @@
 /*
  * mei init function prototypes
  */
-struct mei_device *mei_device_init(struct pci_dev *pdev);
+void mei_device_init(struct mei_device *dev);
 void mei_reset(struct mei_device *dev, int interrupts);
 int mei_hw_init(struct mei_device *dev);
-int mei_task_initialize_clients(void *data);
-int mei_initialize_clients(struct mei_device *dev);
-int mei_disconnect_host_client(struct mei_device *dev, struct mei_cl *cl);
-void mei_allocate_me_clients_storage(struct mei_device *dev);
-
-
-int mei_me_cl_link(struct mei_device *dev, struct mei_cl *cl,
-			const uuid_le *cguid, u8 host_client_id);
-void mei_me_cl_unlink(struct mei_device *dev, struct mei_cl *cl);
-int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *cuuid);
-int mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
-
-/*
- * MEI IO Functions
- */
-struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp);
-void mei_io_cb_free(struct mei_cl_cb *priv_cb);
-int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length);
-int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length);
-
-
-/**
- * mei_io_list_init - Sets up a queue list.
- *
- * @list: An instance cl callback structure
- */
-static inline void mei_io_list_init(struct mei_cl_cb *list)
-{
-	INIT_LIST_HEAD(&list->list);
-}
-void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
-
-/*
- * MEI ME Client Functions
- */
-
-struct mei_cl *mei_cl_allocate(struct mei_device *dev);
-void mei_cl_init(struct mei_cl *cl, struct mei_device *dev);
-int mei_cl_flush_queues(struct mei_cl *cl);
-/**
- * mei_cl_cmp_id - tells if file private data have same id
- *
- * @fe1: private data of 1. file object
- * @fe2: private data of 2. file object
- *
- * returns true  - if ids are the same and not NULL
- */
-static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
-				const struct mei_cl *cl2)
-{
-	return cl1 && cl2 &&
-		(cl1->host_client_id == cl2->host_client_id) &&
-		(cl1->me_client_id == cl2->me_client_id);
-}
-
-
-
-/*
- * MEI Host Client Functions
- */
-void mei_host_start_message(struct mei_device *dev);
-void mei_host_enum_clients_message(struct mei_device *dev);
-int mei_host_client_enumerate(struct mei_device *dev);
-void mei_host_client_init(struct work_struct *work);
 
 /*
  *  MEI interrupt functions prototype
  */
-irqreturn_t mei_interrupt_quick_handler(int irq, void *dev_id);
-irqreturn_t mei_interrupt_thread_handler(int irq, void *dev_id);
+
 void mei_timer(struct work_struct *work);
+int mei_irq_read_handler(struct mei_device *dev,
+		struct mei_cl_cb *cmpl_list, s32 *slots);
 
-/*
- *  MEI input output function prototype
- */
-int mei_ioctl_connect_client(struct file *file,
-			struct mei_connect_client_data *data);
+int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
 
-int mei_start_read(struct mei_device *dev, struct mei_cl *cl);
-
+void mei_irq_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb_pos);
 
 /*
  * AMTHIF - AMT Host Interface Functions
  */
 void mei_amthif_reset_params(struct mei_device *dev);
 
-void mei_amthif_host_init(struct mei_device *dev);
+int mei_amthif_host_init(struct mei_device *dev);
 
 int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *priv_cb);
 
@@ -407,9 +417,6 @@
 void mei_amthif_run_next_cmd(struct mei_device *dev);
 
 
-int mei_amthif_read_message(struct mei_cl_cb *complete_list,
-		struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
-
 int mei_amthif_irq_write_complete(struct mei_device *dev, s32 *slots,
 			struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list);
 
@@ -418,92 +425,107 @@
 		struct mei_device *dev, struct mei_msg_hdr *mei_hdr);
 int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
 
+
+int mei_wd_send(struct mei_device *dev);
+int mei_wd_stop(struct mei_device *dev);
+int mei_wd_host_init(struct mei_device *dev);
+/*
+ * mei_watchdog_register  - Registering watchdog interface
+ *   once we got connection to the WD Client
+ * @dev - mei device
+ */
+void mei_watchdog_register(struct mei_device *dev);
+/*
+ * mei_watchdog_unregister  - Unregistering watchdog interface
+ * @dev - mei device
+ */
+void mei_watchdog_unregister(struct mei_device *dev);
+
 /*
  * Register Access Function
  */
 
-/**
- * mei_reg_read - Reads 32bit data from the mei device
- *
- * @dev: the device structure
- * @offset: offset from which to read the data
- *
- * returns register value (u32)
- */
-static inline u32 mei_reg_read(const struct mei_device *dev,
-			       unsigned long offset)
+static inline void mei_hw_config(struct mei_device *dev)
 {
-	return ioread32(dev->mem_addr + offset);
+	dev->ops->hw_config(dev);
+}
+static inline void mei_hw_reset(struct mei_device *dev, bool enable)
+{
+	dev->ops->hw_reset(dev, enable);
 }
 
-/**
- * mei_reg_write - Writes 32bit data to the mei device
- *
- * @dev: the device structure
- * @offset: offset from which to write the data
- * @value: register value to write (u32)
- */
-static inline void mei_reg_write(const struct mei_device *dev,
-				 unsigned long offset, u32 value)
+static inline void mei_clear_interrupts(struct mei_device *dev)
 {
-	iowrite32(value, dev->mem_addr + offset);
+	dev->ops->intr_clear(dev);
 }
 
-/**
- * mei_hcsr_read - Reads 32bit data from the host CSR
- *
- * @dev: the device structure
- *
- * returns the byte read.
- */
-static inline u32 mei_hcsr_read(const struct mei_device *dev)
+static inline void mei_enable_interrupts(struct mei_device *dev)
 {
-	return mei_reg_read(dev, H_CSR);
+	dev->ops->intr_enable(dev);
 }
 
-/**
- * mei_mecsr_read - Reads 32bit data from the ME CSR
- *
- * @dev: the device structure
- *
- * returns ME_CSR_HA register value (u32)
- */
-static inline u32 mei_mecsr_read(const struct mei_device *dev)
+static inline void mei_disable_interrupts(struct mei_device *dev)
 {
-	return mei_reg_read(dev, ME_CSR_HA);
+	dev->ops->intr_disable(dev);
 }
 
-/**
- * get_me_cb_rw - Reads 32bit data from the mei ME_CB_RW register
- *
- * @dev: the device structure
- *
- * returns ME_CB_RW register value (u32)
- */
-static inline u32 mei_mecbrw_read(const struct mei_device *dev)
+static inline void mei_host_set_ready(struct mei_device *dev)
 {
-	return mei_reg_read(dev, ME_CB_RW);
+	dev->ops->host_set_ready(dev);
+}
+static inline bool mei_host_is_ready(struct mei_device *dev)
+{
+	return dev->ops->host_is_ready(dev);
+}
+static inline bool mei_hw_is_ready(struct mei_device *dev)
+{
+	return dev->ops->hw_is_ready(dev);
 }
 
-
-/*
- * mei interface function prototypes
- */
-void mei_hcsr_set(struct mei_device *dev);
-void mei_csr_clear_his(struct mei_device *dev);
-
-void mei_enable_interrupts(struct mei_device *dev);
-void mei_disable_interrupts(struct mei_device *dev);
-
-static inline struct mei_msg_hdr *mei_hbm_hdr(u32 *buf, size_t length)
+static inline bool mei_hbuf_is_ready(struct mei_device *dev)
 {
-	struct mei_msg_hdr *hdr = (struct mei_msg_hdr *)buf;
-	hdr->host_addr = 0;
-	hdr->me_addr = 0;
-	hdr->length = length;
-	hdr->msg_complete = 1;
-	hdr->reserved = 0;
-	return hdr;
+	return dev->ops->hbuf_is_ready(dev);
 }
 
+static inline int mei_hbuf_empty_slots(struct mei_device *dev)
+{
+	return dev->ops->hbuf_free_slots(dev);
+}
+
+static inline size_t mei_hbuf_max_len(const struct mei_device *dev)
+{
+	return dev->ops->hbuf_max_len(dev);
+}
+
+static inline int mei_write_message(struct mei_device *dev,
+			struct mei_msg_hdr *hdr,
+			unsigned char *buf)
+{
+	return dev->ops->write(dev, hdr, buf);
+}
+
+static inline u32 mei_read_hdr(const struct mei_device *dev)
+{
+	return dev->ops->read_hdr(dev);
+}
+
+static inline void mei_read_slots(struct mei_device *dev,
+		     unsigned char *buf, unsigned long len)
+{
+	dev->ops->read(dev, buf, len);
+}
+
+static inline int mei_count_full_read_slots(struct mei_device *dev)
+{
+	return dev->ops->rdbuf_full_slots(dev);
+}
+
+int mei_register(struct device *dev);
+void mei_deregister(void);
+
+#define MEI_HDR_FMT "hdr:host=%02d me=%02d len=%d comp=%1d"
+#define MEI_HDR_PRM(hdr)                  \
+	(hdr)->host_addr, (hdr)->me_addr, \
+	(hdr)->length, (hdr)->msg_complete
+
 #endif
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
new file mode 100644
index 0000000..b40ec06
--- /dev/null
+++ b/drivers/misc/mei/pci-me.c
@@ -0,0 +1,396 @@
+/*
+ *
+ * Intel Management Engine Interface (Intel MEI) Linux driver
+ * Copyright (c) 2003-2012, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/aio.h>
+#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/cdev.h>
+#include <linux/sched.h>
+#include <linux/uuid.h>
+#include <linux/compat.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+
+#include <linux/mei.h>
+
+#include "mei_dev.h"
+#include "hw-me.h"
+#include "client.h"
+
+/* AMT device is a singleton on the platform */
+static struct pci_dev *mei_pdev;
+
+/* mei_pci_tbl - PCI Device ID Table */
+static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
+	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
+
+	/* required last entry */
+	{0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
+
+static DEFINE_MUTEX(mei_mutex);
+
+/**
+ * mei_quirk_probe - probe for devices that doesn't valid ME interface
+ * @pdev: PCI device structure
+ * @ent: entry into pci_device_table
+ *
+ * returns true if ME Interface is valid, false otherwise
+ */
+static bool mei_quirk_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	u32 reg;
+	if (ent->device == MEI_DEV_ID_PBG_1) {
+		pci_read_config_dword(pdev, 0x48, &reg);
+		/* make sure that bit 9 is up and bit 10 is down */
+		if ((reg & 0x600) == 0x200) {
+			dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
+			return false;
+		}
+	}
+	return true;
+}
+/**
+ * mei_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in kcs_pci_tbl
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int mei_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct mei_device *dev;
+	struct mei_me_hw *hw;
+	int err;
+
+	mutex_lock(&mei_mutex);
+
+	if (!mei_quirk_probe(pdev, ent)) {
+		err = -ENODEV;
+		goto end;
+	}
+
+	if (mei_pdev) {
+		err = -EEXIST;
+		goto end;
+	}
+	/* enable pci dev */
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable pci device.\n");
+		goto end;
+	}
+	/* set PCI host mastering  */
+	pci_set_master(pdev);
+	/* pci request regions for mei driver */
+	err = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (err) {
+		dev_err(&pdev->dev, "failed to get pci regions.\n");
+		goto disable_device;
+	}
+	/* allocates and initializes the mei dev structure */
+	dev = mei_me_dev_init(pdev);
+	if (!dev) {
+		err = -ENOMEM;
+		goto release_regions;
+	}
+	hw = to_me_hw(dev);
+	/* mapping  IO device memory */
+	hw->mem_addr = pci_iomap(pdev, 0, 0);
+	if (!hw->mem_addr) {
+		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
+		err = -ENOMEM;
+		goto free_device;
+	}
+	pci_enable_msi(pdev);
+
+	 /* request and enable interrupt */
+	if (pci_dev_msi_enabled(pdev))
+		err = request_threaded_irq(pdev->irq,
+			NULL,
+			mei_me_irq_thread_handler,
+			IRQF_ONESHOT, KBUILD_MODNAME, dev);
+	else
+		err = request_threaded_irq(pdev->irq,
+			mei_me_irq_quick_handler,
+			mei_me_irq_thread_handler,
+			IRQF_SHARED, KBUILD_MODNAME, dev);
+
+	if (err) {
+		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
+		       pdev->irq);
+		goto disable_msi;
+	}
+
+	if (mei_hw_init(dev)) {
+		dev_err(&pdev->dev, "init hw failure.\n");
+		err = -ENODEV;
+		goto release_irq;
+	}
+
+	err = mei_register(&pdev->dev);
+	if (err)
+		goto release_irq;
+
+	mei_pdev = pdev;
+	pci_set_drvdata(pdev, dev);
+
+
+	schedule_delayed_work(&dev->timer_work, HZ);
+
+	mutex_unlock(&mei_mutex);
+
+	pr_debug("initialization successful.\n");
+
+	return 0;
+
+release_irq:
+	mei_disable_interrupts(dev);
+	flush_scheduled_work();
+	free_irq(pdev->irq, dev);
+disable_msi:
+	pci_disable_msi(pdev);
+	pci_iounmap(pdev, hw->mem_addr);
+free_device:
+	kfree(dev);
+release_regions:
+	pci_release_regions(pdev);
+disable_device:
+	pci_disable_device(pdev);
+end:
+	mutex_unlock(&mei_mutex);
+	dev_err(&pdev->dev, "initialization failed.\n");
+	return err;
+}
+
+/**
+ * mei_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void mei_remove(struct pci_dev *pdev)
+{
+	struct mei_device *dev;
+	struct mei_me_hw *hw;
+
+	if (mei_pdev != pdev)
+		return;
+
+	dev = pci_get_drvdata(pdev);
+	if (!dev)
+		return;
+
+	hw = to_me_hw(dev);
+
+	mutex_lock(&dev->device_lock);
+
+	cancel_delayed_work(&dev->timer_work);
+
+	mei_wd_stop(dev);
+
+	mei_pdev = NULL;
+
+	if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
+		dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
+		mei_cl_disconnect(&dev->iamthif_cl);
+	}
+	if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
+		dev->wd_cl.state = MEI_FILE_DISCONNECTING;
+		mei_cl_disconnect(&dev->wd_cl);
+	}
+
+	/* Unregistering watchdog device */
+	mei_watchdog_unregister(dev);
+
+	/* remove entry if already in list */
+	dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
+
+	if (dev->open_handle_count > 0)
+		dev->open_handle_count--;
+	mei_cl_unlink(&dev->wd_cl);
+
+	if (dev->open_handle_count > 0)
+		dev->open_handle_count--;
+	mei_cl_unlink(&dev->iamthif_cl);
+
+	dev->iamthif_current_cb = NULL;
+	dev->me_clients_num = 0;
+
+	mutex_unlock(&dev->device_lock);
+
+	flush_scheduled_work();
+
+	/* disable interrupts */
+	mei_disable_interrupts(dev);
+
+	free_irq(pdev->irq, dev);
+	pci_disable_msi(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	if (hw->mem_addr)
+		pci_iounmap(pdev, hw->mem_addr);
+
+	kfree(dev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+	mei_deregister();
+
+}
+#ifdef CONFIG_PM
+static int mei_pci_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct mei_device *dev = pci_get_drvdata(pdev);
+	int err;
+
+	if (!dev)
+		return -ENODEV;
+	mutex_lock(&dev->device_lock);
+
+	cancel_delayed_work(&dev->timer_work);
+
+	/* Stop watchdog if exists */
+	err = mei_wd_stop(dev);
+	/* Set new mei state */
+	if (dev->dev_state == MEI_DEV_ENABLED ||
+	    dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
+		dev->dev_state = MEI_DEV_POWER_DOWN;
+		mei_reset(dev, 0);
+	}
+	mutex_unlock(&dev->device_lock);
+
+	free_irq(pdev->irq, dev);
+	pci_disable_msi(pdev);
+
+	return err;
+}
+
+static int mei_pci_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct mei_device *dev;
+	int err;
+
+	dev = pci_get_drvdata(pdev);
+	if (!dev)
+		return -ENODEV;
+
+	pci_enable_msi(pdev);
+
+	/* request and enable interrupt */
+	if (pci_dev_msi_enabled(pdev))
+		err = request_threaded_irq(pdev->irq,
+			NULL,
+			mei_me_irq_thread_handler,
+			IRQF_ONESHOT, KBUILD_MODNAME, dev);
+	else
+		err = request_threaded_irq(pdev->irq,
+			mei_me_irq_quick_handler,
+			mei_me_irq_thread_handler,
+			IRQF_SHARED, KBUILD_MODNAME, dev);
+
+	if (err) {
+		dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
+				pdev->irq);
+		return err;
+	}
+
+	mutex_lock(&dev->device_lock);
+	dev->dev_state = MEI_DEV_POWER_UP;
+	mei_reset(dev, 1);
+	mutex_unlock(&dev->device_lock);
+
+	/* Start timer if stopped in suspend */
+	schedule_delayed_work(&dev->timer_work, HZ);
+
+	return err;
+}
+static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
+#define MEI_PM_OPS	(&mei_pm_ops)
+#else
+#define MEI_PM_OPS	NULL
+#endif /* CONFIG_PM */
+/*
+ *  PCI driver structure
+ */
+static struct pci_driver mei_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = mei_pci_tbl,
+	.probe = mei_probe,
+	.remove = mei_remove,
+	.shutdown = mei_remove,
+	.driver.pm = MEI_PM_OPS,
+};
+
+module_pci_driver(mei_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 9299a8c..2413247 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -21,11 +21,13 @@
 #include <linux/sched.h>
 #include <linux/watchdog.h>
 
-#include "mei_dev.h"
-#include "hw.h"
-#include "interface.h"
 #include <linux/mei.h>
 
+#include "mei_dev.h"
+#include "hbm.h"
+#include "hw-me.h"
+#include "client.h"
+
 static const u8 mei_start_wd_params[] = { 0x02, 0x12, 0x13, 0x10 };
 static const u8 mei_stop_wd_params[] = { 0x02, 0x02, 0x14, 0x10 };
 
@@ -62,30 +64,41 @@
  */
 int mei_wd_host_init(struct mei_device *dev)
 {
-	int id;
-	mei_cl_init(&dev->wd_cl, dev);
+	struct mei_cl *cl = &dev->wd_cl;
+	int i;
+	int ret;
 
-	/* look for WD client and connect to it */
-	dev->wd_cl.state = MEI_FILE_DISCONNECTED;
+	mei_cl_init(cl, dev);
+
 	dev->wd_timeout = MEI_WD_DEFAULT_TIMEOUT;
 	dev->wd_state = MEI_WD_IDLE;
 
-	/* Connect WD ME client to the host client */
-	id = mei_me_cl_link(dev, &dev->wd_cl,
-				&mei_wd_guid, MEI_WD_HOST_CLIENT_ID);
 
-	if (id < 0) {
+	/* check for valid client id */
+	i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
+	if (i < 0) {
 		dev_info(&dev->pdev->dev, "wd: failed to find the client\n");
 		return -ENOENT;
 	}
 
-	if (mei_connect(dev, &dev->wd_cl)) {
+	cl->me_client_id = dev->me_clients[i].client_id;
+
+	ret = mei_cl_link(cl, MEI_WD_HOST_CLIENT_ID);
+
+	if (ret < 0) {
+		dev_info(&dev->pdev->dev, "wd: failed link client\n");
+		return -ENOENT;
+	}
+
+	cl->state = MEI_FILE_CONNECTING;
+
+	if (mei_hbm_cl_connect_req(dev, cl)) {
 		dev_err(&dev->pdev->dev, "wd: failed to connect to the client\n");
-		dev->wd_cl.state = MEI_FILE_DISCONNECTED;
-		dev->wd_cl.host_client_id = 0;
+		cl->state = MEI_FILE_DISCONNECTED;
+		cl->host_client_id = 0;
 		return -EIO;
 	}
-	dev->wd_cl.timer_count = MEI_CONNECT_TIMEOUT;
+	cl->timer_count = MEI_CONNECT_TIMEOUT;
 
 	return 0;
 }
@@ -101,22 +114,21 @@
  */
 int mei_wd_send(struct mei_device *dev)
 {
-	struct mei_msg_hdr *mei_hdr;
+	struct mei_msg_hdr hdr;
 
-	mei_hdr = (struct mei_msg_hdr *) &dev->wr_msg_buf[0];
-	mei_hdr->host_addr = dev->wd_cl.host_client_id;
-	mei_hdr->me_addr = dev->wd_cl.me_client_id;
-	mei_hdr->msg_complete = 1;
-	mei_hdr->reserved = 0;
+	hdr.host_addr = dev->wd_cl.host_client_id;
+	hdr.me_addr = dev->wd_cl.me_client_id;
+	hdr.msg_complete = 1;
+	hdr.reserved = 0;
 
 	if (!memcmp(dev->wd_data, mei_start_wd_params, MEI_WD_HDR_SIZE))
-		mei_hdr->length = MEI_WD_START_MSG_SIZE;
+		hdr.length = MEI_WD_START_MSG_SIZE;
 	else if (!memcmp(dev->wd_data, mei_stop_wd_params, MEI_WD_HDR_SIZE))
-		mei_hdr->length = MEI_WD_STOP_MSG_SIZE;
+		hdr.length = MEI_WD_STOP_MSG_SIZE;
 	else
 		return -EINVAL;
 
-	return mei_write_message(dev, mei_hdr, dev->wd_data, mei_hdr->length);
+	return mei_write_message(dev, &hdr, dev->wd_data);
 }
 
 /**
@@ -141,16 +153,16 @@
 
 	dev->wd_state = MEI_WD_STOPPING;
 
-	ret = mei_flow_ctrl_creds(dev, &dev->wd_cl);
+	ret = mei_cl_flow_ctrl_creds(&dev->wd_cl);
 	if (ret < 0)
 		goto out;
 
-	if (ret && dev->mei_host_buffer_is_empty) {
+	if (ret && dev->hbuf_is_ready) {
 		ret = 0;
-		dev->mei_host_buffer_is_empty = false;
+		dev->hbuf_is_ready = false;
 
 		if (!mei_wd_send(dev)) {
-			ret = mei_flow_ctrl_reduce(dev, &dev->wd_cl);
+			ret = mei_cl_flow_ctrl_reduce(&dev->wd_cl);
 			if (ret)
 				goto out;
 		} else {
@@ -270,10 +282,9 @@
 	dev->wd_state = MEI_WD_RUNNING;
 
 	/* Check if we can send the ping to HW*/
-	if (dev->mei_host_buffer_is_empty &&
-		mei_flow_ctrl_creds(dev, &dev->wd_cl) > 0) {
+	if (dev->hbuf_is_ready && mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
 
-		dev->mei_host_buffer_is_empty = false;
+		dev->hbuf_is_ready = false;
 		dev_dbg(&dev->pdev->dev, "wd: sending ping\n");
 
 		if (mei_wd_send(dev)) {
@@ -282,9 +293,9 @@
 			goto end;
 		}
 
-		if (mei_flow_ctrl_reduce(dev, &dev->wd_cl)) {
+		if (mei_cl_flow_ctrl_reduce(&dev->wd_cl)) {
 			dev_err(&dev->pdev->dev,
-				"wd: mei_flow_ctrl_reduce() failed.\n");
+				"wd: mei_cl_flow_ctrl_reduce() failed.\n");
 			ret = -EIO;
 			goto end;
 		}
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index b90a224..0a14280 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -240,7 +240,8 @@
 	char *ptr;
 	struct st_proto_s *proto;
 	unsigned short payload_len = 0;
-	int len = 0, type = 0;
+	int len = 0;
+	unsigned char type = 0;
 	unsigned char *plen;
 	struct st_data_s *st_gdata = (struct st_data_s *)disc_data;
 	unsigned long flags;
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
new file mode 100644
index 0000000..39c2eca
--- /dev/null
+++ b/drivers/misc/vmw_vmci/Kconfig
@@ -0,0 +1,16 @@
+#
+# VMware VMCI device
+#
+
+config VMWARE_VMCI
+	tristate "VMware VMCI Driver"
+	depends on X86 && PCI
+	help
+	  This is VMware's Virtual Machine Communication Interface.  It enables
+	  high-speed communication between host and guest in a virtual
+	  environment via the VMCI virtual device.
+
+	  If unsure, say N.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called vmw_vmci.
diff --git a/drivers/misc/vmw_vmci/Makefile b/drivers/misc/vmw_vmci/Makefile
new file mode 100644
index 0000000..4da9893
--- /dev/null
+++ b/drivers/misc/vmw_vmci/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci.o
+vmw_vmci-y += vmci_context.o vmci_datagram.o vmci_doorbell.o \
+	vmci_driver.o vmci_event.o vmci_guest.o vmci_handle_array.o \
+	vmci_host.o vmci_queue_pair.o vmci_resource.o vmci_route.o
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
new file mode 100644
index 0000000..f866a4ba
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -0,0 +1,1214 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+/*
+ * List of current VMCI contexts.  Contexts can be added by
+ * vmci_ctx_create() and removed via vmci_ctx_destroy().
+ * These, along with context lookup, are protected by the
+ * list structure's lock.
+ */
+static struct {
+	struct list_head head;
+	spinlock_t lock; /* Spinlock for context list operations */
+} ctx_list = {
+	.head = LIST_HEAD_INIT(ctx_list.head),
+	.lock = __SPIN_LOCK_UNLOCKED(ctx_list.lock),
+};
+
+/* Used by contexts that did not set up notify flag pointers */
+static bool ctx_dummy_notify;
+
+static void ctx_signal_notify(struct vmci_ctx *context)
+{
+	*context->notify = true;
+}
+
+static void ctx_clear_notify(struct vmci_ctx *context)
+{
+	*context->notify = false;
+}
+
+/*
+ * If nothing requires the attention of the guest, clears both
+ * notify flag and call.
+ */
+static void ctx_clear_notify_call(struct vmci_ctx *context)
+{
+	if (context->pending_datagrams == 0 &&
+	    vmci_handle_arr_get_size(context->pending_doorbell_array) == 0)
+		ctx_clear_notify(context);
+}
+
+/*
+ * Sets the context's notify flag iff datagrams are pending for this
+ * context.  Called from vmci_setup_notify().
+ */
+void vmci_ctx_check_signal_notify(struct vmci_ctx *context)
+{
+	spin_lock(&context->lock);
+	if (context->pending_datagrams)
+		ctx_signal_notify(context);
+	spin_unlock(&context->lock);
+}
+
+/*
+ * Allocates and initializes a VMCI context.
+ */
+struct vmci_ctx *vmci_ctx_create(u32 cid, u32 priv_flags,
+				 uintptr_t event_hnd,
+				 int user_version,
+				 const struct cred *cred)
+{
+	struct vmci_ctx *context;
+	int error;
+
+	if (cid == VMCI_INVALID_ID) {
+		pr_devel("Invalid context ID for VMCI context\n");
+		error = -EINVAL;
+		goto err_out;
+	}
+
+	if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS) {
+		pr_devel("Invalid flag (flags=0x%x) for VMCI context\n",
+			 priv_flags);
+		error = -EINVAL;
+		goto err_out;
+	}
+
+	if (user_version == 0) {
+		pr_devel("Invalid suer_version %d\n", user_version);
+		error = -EINVAL;
+		goto err_out;
+	}
+
+	context = kzalloc(sizeof(*context), GFP_KERNEL);
+	if (!context) {
+		pr_warn("Failed to allocate memory for VMCI context\n");
+		error = -EINVAL;
+		goto err_out;
+	}
+
+	kref_init(&context->kref);
+	spin_lock_init(&context->lock);
+	INIT_LIST_HEAD(&context->list_item);
+	INIT_LIST_HEAD(&context->datagram_queue);
+	INIT_LIST_HEAD(&context->notifier_list);
+
+	/* Initialize host-specific VMCI context. */
+	init_waitqueue_head(&context->host_context.wait_queue);
+
+	context->queue_pair_array = vmci_handle_arr_create(0);
+	if (!context->queue_pair_array) {
+		error = -ENOMEM;
+		goto err_free_ctx;
+	}
+
+	context->doorbell_array = vmci_handle_arr_create(0);
+	if (!context->doorbell_array) {
+		error = -ENOMEM;
+		goto err_free_qp_array;
+	}
+
+	context->pending_doorbell_array = vmci_handle_arr_create(0);
+	if (!context->pending_doorbell_array) {
+		error = -ENOMEM;
+		goto err_free_db_array;
+	}
+
+	context->user_version = user_version;
+
+	context->priv_flags = priv_flags;
+
+	if (cred)
+		context->cred = get_cred(cred);
+
+	context->notify = &ctx_dummy_notify;
+	context->notify_page = NULL;
+
+	/*
+	 * If we collide with an existing context we generate a new
+	 * and use it instead. The VMX will determine if regeneration
+	 * is okay. Since there isn't 4B - 16 VMs running on a given
+	 * host, the below loop will terminate.
+	 */
+	spin_lock(&ctx_list.lock);
+
+	while (vmci_ctx_exists(cid)) {
+		/* We reserve the lowest 16 ids for fixed contexts. */
+		cid = max(cid, VMCI_RESERVED_CID_LIMIT - 1) + 1;
+		if (cid == VMCI_INVALID_ID)
+			cid = VMCI_RESERVED_CID_LIMIT;
+	}
+	context->cid = cid;
+
+	list_add_tail_rcu(&context->list_item, &ctx_list.head);
+	spin_unlock(&ctx_list.lock);
+
+	return context;
+
+ err_free_db_array:
+	vmci_handle_arr_destroy(context->doorbell_array);
+ err_free_qp_array:
+	vmci_handle_arr_destroy(context->queue_pair_array);
+ err_free_ctx:
+	kfree(context);
+ err_out:
+	return ERR_PTR(error);
+}
+
+/*
+ * Destroy VMCI context.
+ */
+void vmci_ctx_destroy(struct vmci_ctx *context)
+{
+	spin_lock(&ctx_list.lock);
+	list_del_rcu(&context->list_item);
+	spin_unlock(&ctx_list.lock);
+	synchronize_rcu();
+
+	vmci_ctx_put(context);
+}
+
+/*
+ * Fire notification for all contexts interested in given cid.
+ */
+static int ctx_fire_notification(u32 context_id, u32 priv_flags)
+{
+	u32 i, array_size;
+	struct vmci_ctx *sub_ctx;
+	struct vmci_handle_arr *subscriber_array;
+	struct vmci_handle context_handle =
+		vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
+
+	/*
+	 * We create an array to hold the subscribers we find when
+	 * scanning through all contexts.
+	 */
+	subscriber_array = vmci_handle_arr_create(0);
+	if (subscriber_array == NULL)
+		return VMCI_ERROR_NO_MEM;
+
+	/*
+	 * Scan all contexts to find who is interested in being
+	 * notified about given contextID.
+	 */
+	rcu_read_lock();
+	list_for_each_entry_rcu(sub_ctx, &ctx_list.head, list_item) {
+		struct vmci_handle_list *node;
+
+		/*
+		 * We only deliver notifications of the removal of
+		 * contexts, if the two contexts are allowed to
+		 * interact.
+		 */
+		if (vmci_deny_interaction(priv_flags, sub_ctx->priv_flags))
+			continue;
+
+		list_for_each_entry_rcu(node, &sub_ctx->notifier_list, node) {
+			if (!vmci_handle_is_equal(node->handle, context_handle))
+				continue;
+
+			vmci_handle_arr_append_entry(&subscriber_array,
+					vmci_make_handle(sub_ctx->cid,
+							 VMCI_EVENT_HANDLER));
+		}
+	}
+	rcu_read_unlock();
+
+	/* Fire event to all subscribers. */
+	array_size = vmci_handle_arr_get_size(subscriber_array);
+	for (i = 0; i < array_size; i++) {
+		int result;
+		struct vmci_event_ctx ev;
+
+		ev.msg.hdr.dst = vmci_handle_arr_get_entry(subscriber_array, i);
+		ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+						  VMCI_CONTEXT_RESOURCE_ID);
+		ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+		ev.msg.event_data.event = VMCI_EVENT_CTX_REMOVED;
+		ev.payload.context_id = context_id;
+
+		result = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
+						&ev.msg.hdr, false);
+		if (result < VMCI_SUCCESS) {
+			pr_devel("Failed to enqueue event datagram (type=%d) for context (ID=0x%x)\n",
+				 ev.msg.event_data.event,
+				 ev.msg.hdr.dst.context);
+			/* We continue to enqueue on next subscriber. */
+		}
+	}
+	vmci_handle_arr_destroy(subscriber_array);
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Returns the current number of pending datagrams. The call may
+ * also serve as a synchronization point for the datagram queue,
+ * as no enqueue operations can occur concurrently.
+ */
+int vmci_ctx_pending_datagrams(u32 cid, u32 *pending)
+{
+	struct vmci_ctx *context;
+
+	context = vmci_ctx_get(cid);
+	if (context == NULL)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	spin_lock(&context->lock);
+	if (pending)
+		*pending = context->pending_datagrams;
+	spin_unlock(&context->lock);
+	vmci_ctx_put(context);
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Queues a VMCI datagram for the appropriate target VM context.
+ */
+int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg)
+{
+	struct vmci_datagram_queue_entry *dq_entry;
+	struct vmci_ctx *context;
+	struct vmci_handle dg_src;
+	size_t vmci_dg_size;
+
+	vmci_dg_size = VMCI_DG_SIZE(dg);
+	if (vmci_dg_size > VMCI_MAX_DG_SIZE) {
+		pr_devel("Datagram too large (bytes=%Zu)\n", vmci_dg_size);
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	/* Get the target VM's VMCI context. */
+	context = vmci_ctx_get(cid);
+	if (!context) {
+		pr_devel("Invalid context (ID=0x%x)\n", cid);
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	/* Allocate guest call entry and add it to the target VM's queue. */
+	dq_entry = kmalloc(sizeof(*dq_entry), GFP_KERNEL);
+	if (dq_entry == NULL) {
+		pr_warn("Failed to allocate memory for datagram\n");
+		vmci_ctx_put(context);
+		return VMCI_ERROR_NO_MEM;
+	}
+	dq_entry->dg = dg;
+	dq_entry->dg_size = vmci_dg_size;
+	dg_src = dg->src;
+	INIT_LIST_HEAD(&dq_entry->list_item);
+
+	spin_lock(&context->lock);
+
+	/*
+	 * We put a higher limit on datagrams from the hypervisor.  If
+	 * the pending datagram is not from hypervisor, then we check
+	 * if enqueueing it would exceed the
+	 * VMCI_MAX_DATAGRAM_QUEUE_SIZE limit on the destination.  If
+	 * the pending datagram is from hypervisor, we allow it to be
+	 * queued at the destination side provided we don't reach the
+	 * VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE limit.
+	 */
+	if (context->datagram_queue_size + vmci_dg_size >=
+	    VMCI_MAX_DATAGRAM_QUEUE_SIZE &&
+	    (!vmci_handle_is_equal(dg_src,
+				vmci_make_handle
+				(VMCI_HYPERVISOR_CONTEXT_ID,
+				 VMCI_CONTEXT_RESOURCE_ID)) ||
+	     context->datagram_queue_size + vmci_dg_size >=
+	     VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE)) {
+		spin_unlock(&context->lock);
+		vmci_ctx_put(context);
+		kfree(dq_entry);
+		pr_devel("Context (ID=0x%x) receive queue is full\n", cid);
+		return VMCI_ERROR_NO_RESOURCES;
+	}
+
+	list_add(&dq_entry->list_item, &context->datagram_queue);
+	context->pending_datagrams++;
+	context->datagram_queue_size += vmci_dg_size;
+	ctx_signal_notify(context);
+	wake_up(&context->host_context.wait_queue);
+	spin_unlock(&context->lock);
+	vmci_ctx_put(context);
+
+	return vmci_dg_size;
+}
+
+/*
+ * Verifies whether a context with the specified context ID exists.
+ * FIXME: utility is dubious as no decisions can be reliably made
+ * using this data as context can appear and disappear at any time.
+ */
+bool vmci_ctx_exists(u32 cid)
+{
+	struct vmci_ctx *context;
+	bool exists = false;
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(context, &ctx_list.head, list_item) {
+		if (context->cid == cid) {
+			exists = true;
+			break;
+		}
+	}
+
+	rcu_read_unlock();
+	return exists;
+}
+
+/*
+ * Retrieves VMCI context corresponding to the given cid.
+ */
+struct vmci_ctx *vmci_ctx_get(u32 cid)
+{
+	struct vmci_ctx *c, *context = NULL;
+
+	if (cid == VMCI_INVALID_ID)
+		return NULL;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(c, &ctx_list.head, list_item) {
+		if (c->cid == cid) {
+			/*
+			 * The context owner drops its own reference to the
+			 * context only after removing it from the list and
+			 * waiting for RCU grace period to expire. This
+			 * means that we are not about to increase the
+			 * reference count of something that is in the
+			 * process of being destroyed.
+			 */
+			context = c;
+			kref_get(&context->kref);
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return context;
+}
+
+/*
+ * Deallocates all parts of a context data structure. This
+ * function doesn't lock the context, because it assumes that
+ * the caller was holding the last reference to context.
+ */
+static void ctx_free_ctx(struct kref *kref)
+{
+	struct vmci_ctx *context = container_of(kref, struct vmci_ctx, kref);
+	struct vmci_datagram_queue_entry *dq_entry, *dq_entry_tmp;
+	struct vmci_handle temp_handle;
+	struct vmci_handle_list *notifier, *tmp;
+
+	/*
+	 * Fire event to all contexts interested in knowing this
+	 * context is dying.
+	 */
+	ctx_fire_notification(context->cid, context->priv_flags);
+
+	/*
+	 * Cleanup all queue pair resources attached to context.  If
+	 * the VM dies without cleaning up, this code will make sure
+	 * that no resources are leaked.
+	 */
+	temp_handle = vmci_handle_arr_get_entry(context->queue_pair_array, 0);
+	while (!vmci_handle_is_equal(temp_handle, VMCI_INVALID_HANDLE)) {
+		if (vmci_qp_broker_detach(temp_handle,
+					  context) < VMCI_SUCCESS) {
+			/*
+			 * When vmci_qp_broker_detach() succeeds it
+			 * removes the handle from the array.  If
+			 * detach fails, we must remove the handle
+			 * ourselves.
+			 */
+			vmci_handle_arr_remove_entry(context->queue_pair_array,
+						     temp_handle);
+		}
+		temp_handle =
+		    vmci_handle_arr_get_entry(context->queue_pair_array, 0);
+	}
+
+	/*
+	 * It is fine to destroy this without locking the callQueue, as
+	 * this is the only thread having a reference to the context.
+	 */
+	list_for_each_entry_safe(dq_entry, dq_entry_tmp,
+				 &context->datagram_queue, list_item) {
+		WARN_ON(dq_entry->dg_size != VMCI_DG_SIZE(dq_entry->dg));
+		list_del(&dq_entry->list_item);
+		kfree(dq_entry->dg);
+		kfree(dq_entry);
+	}
+
+	list_for_each_entry_safe(notifier, tmp,
+				 &context->notifier_list, node) {
+		list_del(&notifier->node);
+		kfree(notifier);
+	}
+
+	vmci_handle_arr_destroy(context->queue_pair_array);
+	vmci_handle_arr_destroy(context->doorbell_array);
+	vmci_handle_arr_destroy(context->pending_doorbell_array);
+	vmci_ctx_unset_notify(context);
+	if (context->cred)
+		put_cred(context->cred);
+	kfree(context);
+}
+
+/*
+ * Drops reference to VMCI context. If this is the last reference to
+ * the context it will be deallocated. A context is created with
+ * a reference count of one, and on destroy, it is removed from
+ * the context list before its reference count is decremented. Thus,
+ * if we reach zero, we are sure that nobody else are about to increment
+ * it (they need the entry in the context list for that), and so there
+ * is no need for locking.
+ */
+void vmci_ctx_put(struct vmci_ctx *context)
+{
+	kref_put(&context->kref, ctx_free_ctx);
+}
+
+/*
+ * Dequeues the next datagram and returns it to caller.
+ * The caller passes in a pointer to the max size datagram
+ * it can handle and the datagram is only unqueued if the
+ * size is less than max_size. If larger max_size is set to
+ * the size of the datagram to give the caller a chance to
+ * set up a larger buffer for the guestcall.
+ */
+int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
+			      size_t *max_size,
+			      struct vmci_datagram **dg)
+{
+	struct vmci_datagram_queue_entry *dq_entry;
+	struct list_head *list_item;
+	int rv;
+
+	/* Dequeue the next datagram entry. */
+	spin_lock(&context->lock);
+	if (context->pending_datagrams == 0) {
+		ctx_clear_notify_call(context);
+		spin_unlock(&context->lock);
+		pr_devel("No datagrams pending\n");
+		return VMCI_ERROR_NO_MORE_DATAGRAMS;
+	}
+
+	list_item = context->datagram_queue.next;
+
+	dq_entry =
+	    list_entry(list_item, struct vmci_datagram_queue_entry, list_item);
+
+	/* Check size of caller's buffer. */
+	if (*max_size < dq_entry->dg_size) {
+		*max_size = dq_entry->dg_size;
+		spin_unlock(&context->lock);
+		pr_devel("Caller's buffer should be at least (size=%u bytes)\n",
+			 (u32) *max_size);
+		return VMCI_ERROR_NO_MEM;
+	}
+
+	list_del(list_item);
+	context->pending_datagrams--;
+	context->datagram_queue_size -= dq_entry->dg_size;
+	if (context->pending_datagrams == 0) {
+		ctx_clear_notify_call(context);
+		rv = VMCI_SUCCESS;
+	} else {
+		/*
+		 * Return the size of the next datagram.
+		 */
+		struct vmci_datagram_queue_entry *next_entry;
+
+		list_item = context->datagram_queue.next;
+		next_entry =
+		    list_entry(list_item, struct vmci_datagram_queue_entry,
+			       list_item);
+
+		/*
+		 * The following size_t -> int truncation is fine as
+		 * the maximum size of a (routable) datagram is 68KB.
+		 */
+		rv = (int)next_entry->dg_size;
+	}
+	spin_unlock(&context->lock);
+
+	/* Caller must free datagram. */
+	*dg = dq_entry->dg;
+	dq_entry->dg = NULL;
+	kfree(dq_entry);
+
+	return rv;
+}
+
+/*
+ * Reverts actions set up by vmci_setup_notify().  Unmaps and unlocks the
+ * page mapped/locked by vmci_setup_notify().
+ */
+void vmci_ctx_unset_notify(struct vmci_ctx *context)
+{
+	struct page *notify_page;
+
+	spin_lock(&context->lock);
+
+	notify_page = context->notify_page;
+	context->notify = &ctx_dummy_notify;
+	context->notify_page = NULL;
+
+	spin_unlock(&context->lock);
+
+	if (notify_page) {
+		kunmap(notify_page);
+		put_page(notify_page);
+	}
+}
+
+/*
+ * Add remote_cid to list of contexts current contexts wants
+ * notifications from/about.
+ */
+int vmci_ctx_add_notification(u32 context_id, u32 remote_cid)
+{
+	struct vmci_ctx *context;
+	struct vmci_handle_list *notifier, *n;
+	int result;
+	bool exists = false;
+
+	context = vmci_ctx_get(context_id);
+	if (!context)
+		return VMCI_ERROR_NOT_FOUND;
+
+	if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(remote_cid)) {
+		pr_devel("Context removed notifications for other VMs not supported (src=0x%x, remote=0x%x)\n",
+			 context_id, remote_cid);
+		result = VMCI_ERROR_DST_UNREACHABLE;
+		goto out;
+	}
+
+	if (context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) {
+		result = VMCI_ERROR_NO_ACCESS;
+		goto out;
+	}
+
+	notifier = kmalloc(sizeof(struct vmci_handle_list), GFP_KERNEL);
+	if (!notifier) {
+		result = VMCI_ERROR_NO_MEM;
+		goto out;
+	}
+
+	INIT_LIST_HEAD(&notifier->node);
+	notifier->handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
+
+	spin_lock(&context->lock);
+
+	list_for_each_entry(n, &context->notifier_list, node) {
+		if (vmci_handle_is_equal(n->handle, notifier->handle)) {
+			exists = true;
+			break;
+		}
+	}
+
+	if (exists) {
+		kfree(notifier);
+		result = VMCI_ERROR_ALREADY_EXISTS;
+	} else {
+		list_add_tail_rcu(&notifier->node, &context->notifier_list);
+		context->n_notifiers++;
+		result = VMCI_SUCCESS;
+	}
+
+	spin_unlock(&context->lock);
+
+ out:
+	vmci_ctx_put(context);
+	return result;
+}
+
+/*
+ * Remove remote_cid from current context's list of contexts it is
+ * interested in getting notifications from/about.
+ */
+int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
+{
+	struct vmci_ctx *context;
+	struct vmci_handle_list *notifier, *tmp;
+	struct vmci_handle handle;
+	bool found = false;
+
+	context = vmci_ctx_get(context_id);
+	if (!context)
+		return VMCI_ERROR_NOT_FOUND;
+
+	handle = vmci_make_handle(remote_cid, VMCI_EVENT_HANDLER);
+
+	spin_lock(&context->lock);
+	list_for_each_entry_safe(notifier, tmp,
+				 &context->notifier_list, node) {
+		if (vmci_handle_is_equal(notifier->handle, handle)) {
+			list_del_rcu(&notifier->node);
+			context->n_notifiers--;
+			found = true;
+			break;
+		}
+	}
+	spin_unlock(&context->lock);
+
+	if (found) {
+		synchronize_rcu();
+		kfree(notifier);
+	}
+
+	vmci_ctx_put(context);
+
+	return found ? VMCI_SUCCESS : VMCI_ERROR_NOT_FOUND;
+}
+
+static int vmci_ctx_get_chkpt_notifiers(struct vmci_ctx *context,
+					u32 *buf_size, void **pbuf)
+{
+	u32 *notifiers;
+	size_t data_size;
+	struct vmci_handle_list *entry;
+	int i = 0;
+
+	if (context->n_notifiers == 0) {
+		*buf_size = 0;
+		*pbuf = NULL;
+		return VMCI_SUCCESS;
+	}
+
+	data_size = context->n_notifiers * sizeof(*notifiers);
+	if (*buf_size < data_size) {
+		*buf_size = data_size;
+		return VMCI_ERROR_MORE_DATA;
+	}
+
+	notifiers = kmalloc(data_size, GFP_ATOMIC); /* FIXME: want GFP_KERNEL */
+	if (!notifiers)
+		return VMCI_ERROR_NO_MEM;
+
+	list_for_each_entry(entry, &context->notifier_list, node)
+		notifiers[i++] = entry->handle.context;
+
+	*buf_size = data_size;
+	*pbuf = notifiers;
+	return VMCI_SUCCESS;
+}
+
+static int vmci_ctx_get_chkpt_doorbells(struct vmci_ctx *context,
+					u32 *buf_size, void **pbuf)
+{
+	struct dbell_cpt_state *dbells;
+	size_t n_doorbells;
+	int i;
+
+	n_doorbells = vmci_handle_arr_get_size(context->doorbell_array);
+	if (n_doorbells > 0) {
+		size_t data_size = n_doorbells * sizeof(*dbells);
+		if (*buf_size < data_size) {
+			*buf_size = data_size;
+			return VMCI_ERROR_MORE_DATA;
+		}
+
+		dbells = kmalloc(data_size, GFP_ATOMIC);
+		if (!dbells)
+			return VMCI_ERROR_NO_MEM;
+
+		for (i = 0; i < n_doorbells; i++)
+			dbells[i].handle = vmci_handle_arr_get_entry(
+						context->doorbell_array, i);
+
+		*buf_size = data_size;
+		*pbuf = dbells;
+	} else {
+		*buf_size = 0;
+		*pbuf = NULL;
+	}
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Get current context's checkpoint state of given type.
+ */
+int vmci_ctx_get_chkpt_state(u32 context_id,
+			     u32 cpt_type,
+			     u32 *buf_size,
+			     void **pbuf)
+{
+	struct vmci_ctx *context;
+	int result;
+
+	context = vmci_ctx_get(context_id);
+	if (!context)
+		return VMCI_ERROR_NOT_FOUND;
+
+	spin_lock(&context->lock);
+
+	switch (cpt_type) {
+	case VMCI_NOTIFICATION_CPT_STATE:
+		result = vmci_ctx_get_chkpt_notifiers(context, buf_size, pbuf);
+		break;
+
+	case VMCI_WELLKNOWN_CPT_STATE:
+		/*
+		 * For compatibility with VMX'en with VM to VM communication, we
+		 * always return zero wellknown handles.
+		 */
+
+		*buf_size = 0;
+		*pbuf = NULL;
+		result = VMCI_SUCCESS;
+		break;
+
+	case VMCI_DOORBELL_CPT_STATE:
+		result = vmci_ctx_get_chkpt_doorbells(context, buf_size, pbuf);
+		break;
+
+	default:
+		pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
+		result = VMCI_ERROR_INVALID_ARGS;
+		break;
+	}
+
+	spin_unlock(&context->lock);
+	vmci_ctx_put(context);
+
+	return result;
+}
+
+/*
+ * Set current context's checkpoint state of given type.
+ */
+int vmci_ctx_set_chkpt_state(u32 context_id,
+			     u32 cpt_type,
+			     u32 buf_size,
+			     void *cpt_buf)
+{
+	u32 i;
+	u32 current_id;
+	int result = VMCI_SUCCESS;
+	u32 num_ids = buf_size / sizeof(u32);
+
+	if (cpt_type == VMCI_WELLKNOWN_CPT_STATE && num_ids > 0) {
+		/*
+		 * We would end up here if VMX with VM to VM communication
+		 * attempts to restore a checkpoint with wellknown handles.
+		 */
+		pr_warn("Attempt to restore checkpoint with obsolete wellknown handles\n");
+		return VMCI_ERROR_OBSOLETE;
+	}
+
+	if (cpt_type != VMCI_NOTIFICATION_CPT_STATE) {
+		pr_devel("Invalid cpt state (type=%d)\n", cpt_type);
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	for (i = 0; i < num_ids && result == VMCI_SUCCESS; i++) {
+		current_id = ((u32 *)cpt_buf)[i];
+		result = vmci_ctx_add_notification(context_id, current_id);
+		if (result != VMCI_SUCCESS)
+			break;
+	}
+	if (result != VMCI_SUCCESS)
+		pr_devel("Failed to set cpt state (type=%d) (error=%d)\n",
+			 cpt_type, result);
+
+	return result;
+}
+
+/*
+ * Retrieves the specified context's pending notifications in the
+ * form of a handle array. The handle arrays returned are the
+ * actual data - not a copy and should not be modified by the
+ * caller. They must be released using
+ * vmci_ctx_rcv_notifications_release.
+ */
+int vmci_ctx_rcv_notifications_get(u32 context_id,
+				   struct vmci_handle_arr **db_handle_array,
+				   struct vmci_handle_arr **qp_handle_array)
+{
+	struct vmci_ctx *context;
+	int result = VMCI_SUCCESS;
+
+	context = vmci_ctx_get(context_id);
+	if (context == NULL)
+		return VMCI_ERROR_NOT_FOUND;
+
+	spin_lock(&context->lock);
+
+	*db_handle_array = context->pending_doorbell_array;
+	context->pending_doorbell_array = vmci_handle_arr_create(0);
+	if (!context->pending_doorbell_array) {
+		context->pending_doorbell_array = *db_handle_array;
+		*db_handle_array = NULL;
+		result = VMCI_ERROR_NO_MEM;
+	}
+	*qp_handle_array = NULL;
+
+	spin_unlock(&context->lock);
+	vmci_ctx_put(context);
+
+	return result;
+}
+
+/*
+ * Releases handle arrays with pending notifications previously
+ * retrieved using vmci_ctx_rcv_notifications_get. If the
+ * notifications were not successfully handed over to the guest,
+ * success must be false.
+ */
+void vmci_ctx_rcv_notifications_release(u32 context_id,
+					struct vmci_handle_arr *db_handle_array,
+					struct vmci_handle_arr *qp_handle_array,
+					bool success)
+{
+	struct vmci_ctx *context = vmci_ctx_get(context_id);
+
+	spin_lock(&context->lock);
+	if (!success) {
+		struct vmci_handle handle;
+
+		/*
+		 * New notifications may have been added while we were not
+		 * holding the context lock, so we transfer any new pending
+		 * doorbell notifications to the old array, and reinstate the
+		 * old array.
+		 */
+
+		handle = vmci_handle_arr_remove_tail(
+					context->pending_doorbell_array);
+		while (!vmci_handle_is_invalid(handle)) {
+			if (!vmci_handle_arr_has_entry(db_handle_array,
+						       handle)) {
+				vmci_handle_arr_append_entry(
+						&db_handle_array, handle);
+			}
+			handle = vmci_handle_arr_remove_tail(
+					context->pending_doorbell_array);
+		}
+		vmci_handle_arr_destroy(context->pending_doorbell_array);
+		context->pending_doorbell_array = db_handle_array;
+		db_handle_array = NULL;
+	} else {
+		ctx_clear_notify_call(context);
+	}
+	spin_unlock(&context->lock);
+	vmci_ctx_put(context);
+
+	if (db_handle_array)
+		vmci_handle_arr_destroy(db_handle_array);
+
+	if (qp_handle_array)
+		vmci_handle_arr_destroy(qp_handle_array);
+}
+
+/*
+ * Registers that a new doorbell handle has been allocated by the
+ * context. Only doorbell handles registered can be notified.
+ */
+int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle)
+{
+	struct vmci_ctx *context;
+	int result;
+
+	if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	context = vmci_ctx_get(context_id);
+	if (context == NULL)
+		return VMCI_ERROR_NOT_FOUND;
+
+	spin_lock(&context->lock);
+	if (!vmci_handle_arr_has_entry(context->doorbell_array, handle)) {
+		vmci_handle_arr_append_entry(&context->doorbell_array, handle);
+		result = VMCI_SUCCESS;
+	} else {
+		result = VMCI_ERROR_DUPLICATE_ENTRY;
+	}
+
+	spin_unlock(&context->lock);
+	vmci_ctx_put(context);
+
+	return result;
+}
+
+/*
+ * Unregisters a doorbell handle that was previously registered
+ * with vmci_ctx_dbell_create.
+ */
+int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle)
+{
+	struct vmci_ctx *context;
+	struct vmci_handle removed_handle;
+
+	if (context_id == VMCI_INVALID_ID || vmci_handle_is_invalid(handle))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	context = vmci_ctx_get(context_id);
+	if (context == NULL)
+		return VMCI_ERROR_NOT_FOUND;
+
+	spin_lock(&context->lock);
+	removed_handle =
+	    vmci_handle_arr_remove_entry(context->doorbell_array, handle);
+	vmci_handle_arr_remove_entry(context->pending_doorbell_array, handle);
+	spin_unlock(&context->lock);
+
+	vmci_ctx_put(context);
+
+	return vmci_handle_is_invalid(removed_handle) ?
+	    VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
+}
+
+/*
+ * Unregisters all doorbell handles that were previously
+ * registered with vmci_ctx_dbell_create.
+ */
+int vmci_ctx_dbell_destroy_all(u32 context_id)
+{
+	struct vmci_ctx *context;
+	struct vmci_handle handle;
+
+	if (context_id == VMCI_INVALID_ID)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	context = vmci_ctx_get(context_id);
+	if (context == NULL)
+		return VMCI_ERROR_NOT_FOUND;
+
+	spin_lock(&context->lock);
+	do {
+		struct vmci_handle_arr *arr = context->doorbell_array;
+		handle = vmci_handle_arr_remove_tail(arr);
+	} while (!vmci_handle_is_invalid(handle));
+	do {
+		struct vmci_handle_arr *arr = context->pending_doorbell_array;
+		handle = vmci_handle_arr_remove_tail(arr);
+	} while (!vmci_handle_is_invalid(handle));
+	spin_unlock(&context->lock);
+
+	vmci_ctx_put(context);
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Registers a notification of a doorbell handle initiated by the
+ * specified source context. The notification of doorbells are
+ * subject to the same isolation rules as datagram delivery. To
+ * allow host side senders of notifications a finer granularity
+ * of sender rights than those assigned to the sending context
+ * itself, the host context is required to specify a different
+ * set of privilege flags that will override the privileges of
+ * the source context.
+ */
+int vmci_ctx_notify_dbell(u32 src_cid,
+			  struct vmci_handle handle,
+			  u32 src_priv_flags)
+{
+	struct vmci_ctx *dst_context;
+	int result;
+
+	if (vmci_handle_is_invalid(handle))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	/* Get the target VM's VMCI context. */
+	dst_context = vmci_ctx_get(handle.context);
+	if (!dst_context) {
+		pr_devel("Invalid context (ID=0x%x)\n", handle.context);
+		return VMCI_ERROR_NOT_FOUND;
+	}
+
+	if (src_cid != handle.context) {
+		u32 dst_priv_flags;
+
+		if (VMCI_CONTEXT_IS_VM(src_cid) &&
+		    VMCI_CONTEXT_IS_VM(handle.context)) {
+			pr_devel("Doorbell notification from VM to VM not supported (src=0x%x, dst=0x%x)\n",
+				 src_cid, handle.context);
+			result = VMCI_ERROR_DST_UNREACHABLE;
+			goto out;
+		}
+
+		result = vmci_dbell_get_priv_flags(handle, &dst_priv_flags);
+		if (result < VMCI_SUCCESS) {
+			pr_warn("Failed to get privilege flags for destination (handle=0x%x:0x%x)\n",
+				handle.context, handle.resource);
+			goto out;
+		}
+
+		if (src_cid != VMCI_HOST_CONTEXT_ID ||
+		    src_priv_flags == VMCI_NO_PRIVILEGE_FLAGS) {
+			src_priv_flags = vmci_context_get_priv_flags(src_cid);
+		}
+
+		if (vmci_deny_interaction(src_priv_flags, dst_priv_flags)) {
+			result = VMCI_ERROR_NO_ACCESS;
+			goto out;
+		}
+	}
+
+	if (handle.context == VMCI_HOST_CONTEXT_ID) {
+		result = vmci_dbell_host_context_notify(src_cid, handle);
+	} else {
+		spin_lock(&dst_context->lock);
+
+		if (!vmci_handle_arr_has_entry(dst_context->doorbell_array,
+					       handle)) {
+			result = VMCI_ERROR_NOT_FOUND;
+		} else {
+			if (!vmci_handle_arr_has_entry(
+					dst_context->pending_doorbell_array,
+					handle)) {
+				vmci_handle_arr_append_entry(
+					&dst_context->pending_doorbell_array,
+					handle);
+
+				ctx_signal_notify(dst_context);
+				wake_up(&dst_context->host_context.wait_queue);
+
+			}
+			result = VMCI_SUCCESS;
+		}
+		spin_unlock(&dst_context->lock);
+	}
+
+ out:
+	vmci_ctx_put(dst_context);
+
+	return result;
+}
+
+bool vmci_ctx_supports_host_qp(struct vmci_ctx *context)
+{
+	return context && context->user_version >= VMCI_VERSION_HOSTQP;
+}
+
+/*
+ * Registers that a new queue pair handle has been allocated by
+ * the context.
+ */
+int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle)
+{
+	int result;
+
+	if (context == NULL || vmci_handle_is_invalid(handle))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	if (!vmci_handle_arr_has_entry(context->queue_pair_array, handle)) {
+		vmci_handle_arr_append_entry(&context->queue_pair_array,
+					     handle);
+		result = VMCI_SUCCESS;
+	} else {
+		result = VMCI_ERROR_DUPLICATE_ENTRY;
+	}
+
+	return result;
+}
+
+/*
+ * Unregisters a queue pair handle that was previously registered
+ * with vmci_ctx_qp_create.
+ */
+int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle)
+{
+	struct vmci_handle hndl;
+
+	if (context == NULL || vmci_handle_is_invalid(handle))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	hndl = vmci_handle_arr_remove_entry(context->queue_pair_array, handle);
+
+	return vmci_handle_is_invalid(hndl) ?
+		VMCI_ERROR_NOT_FOUND : VMCI_SUCCESS;
+}
+
+/*
+ * Determines whether a given queue pair handle is registered
+ * with the given context.
+ */
+bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle)
+{
+	if (context == NULL || vmci_handle_is_invalid(handle))
+		return false;
+
+	return vmci_handle_arr_has_entry(context->queue_pair_array, handle);
+}
+
+/*
+ * vmci_context_get_priv_flags() - Retrieve privilege flags.
+ * @context_id: The context ID of the VMCI context.
+ *
+ * Retrieves privilege flags of the given VMCI context ID.
+ */
+u32 vmci_context_get_priv_flags(u32 context_id)
+{
+	if (vmci_host_code_active()) {
+		u32 flags;
+		struct vmci_ctx *context;
+
+		context = vmci_ctx_get(context_id);
+		if (!context)
+			return VMCI_LEAST_PRIVILEGE_FLAGS;
+
+		flags = context->priv_flags;
+		vmci_ctx_put(context);
+		return flags;
+	}
+	return VMCI_NO_PRIVILEGE_FLAGS;
+}
+EXPORT_SYMBOL_GPL(vmci_context_get_priv_flags);
+
+/*
+ * vmci_is_context_owner() - Determimnes if user is the context owner
+ * @context_id: The context ID of the VMCI context.
+ * @uid:        The host user id (real kernel value).
+ *
+ * Determines whether a given UID is the owner of given VMCI context.
+ */
+bool vmci_is_context_owner(u32 context_id, kuid_t uid)
+{
+	bool is_owner = false;
+
+	if (vmci_host_code_active()) {
+		struct vmci_ctx *context = vmci_ctx_get(context_id);
+		if (context) {
+			if (context->cred)
+				is_owner = uid_eq(context->cred->uid, uid);
+			vmci_ctx_put(context);
+		}
+	}
+
+	return is_owner;
+}
+EXPORT_SYMBOL_GPL(vmci_is_context_owner);
diff --git a/drivers/misc/vmw_vmci/vmci_context.h b/drivers/misc/vmw_vmci/vmci_context.h
new file mode 100644
index 0000000..24a88e6
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_context.h
@@ -0,0 +1,182 @@
+/*
+ * VMware VMCI driver (vmciContext.h)
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_CONTEXT_H_
+#define _VMCI_CONTEXT_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/atomic.h>
+#include <linux/kref.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_datagram.h"
+
+/* Used to determine what checkpoint state to get and set. */
+enum {
+	VMCI_NOTIFICATION_CPT_STATE = 1,
+	VMCI_WELLKNOWN_CPT_STATE    = 2,
+	VMCI_DG_OUT_STATE           = 3,
+	VMCI_DG_IN_STATE            = 4,
+	VMCI_DG_IN_SIZE_STATE       = 5,
+	VMCI_DOORBELL_CPT_STATE     = 6,
+};
+
+/* Host specific struct used for signalling */
+struct vmci_host {
+	wait_queue_head_t wait_queue;
+};
+
+struct vmci_handle_list {
+	struct list_head node;
+	struct vmci_handle handle;
+};
+
+struct vmci_ctx {
+	struct list_head list_item;       /* For global VMCI list. */
+	u32 cid;
+	struct kref kref;
+	struct list_head datagram_queue;  /* Head of per VM queue. */
+	u32 pending_datagrams;
+	size_t datagram_queue_size;	  /* Size of datagram queue in bytes. */
+
+	/*
+	 * Version of the code that created
+	 * this context; e.g., VMX.
+	 */
+	int user_version;
+	spinlock_t lock;  /* Locks callQueue and handle_arrays. */
+
+	/*
+	 * queue_pairs attached to.  The array of
+	 * handles for queue pairs is accessed
+	 * from the code for QP API, and there
+	 * it is protected by the QP lock.  It
+	 * is also accessed from the context
+	 * clean up path, which does not
+	 * require a lock.  VMCILock is not
+	 * used to protect the QP array field.
+	 */
+	struct vmci_handle_arr *queue_pair_array;
+
+	/* Doorbells created by context. */
+	struct vmci_handle_arr *doorbell_array;
+
+	/* Doorbells pending for context. */
+	struct vmci_handle_arr *pending_doorbell_array;
+
+	/* Contexts current context is subscribing to. */
+	struct list_head notifier_list;
+	unsigned int n_notifiers;
+
+	struct vmci_host host_context;
+	u32 priv_flags;
+
+	const struct cred *cred;
+	bool *notify;		/* Notify flag pointer - hosted only. */
+	struct page *notify_page;	/* Page backing the notify UVA. */
+};
+
+/* VMCINotifyAddRemoveInfo: Used to add/remove remote context notifications. */
+struct vmci_ctx_info {
+	u32 remote_cid;
+	int result;
+};
+
+/* VMCICptBufInfo: Used to set/get current context's checkpoint state. */
+struct vmci_ctx_chkpt_buf_info {
+	u64 cpt_buf;
+	u32 cpt_type;
+	u32 buf_size;
+	s32 result;
+	u32 _pad;
+};
+
+/*
+ * VMCINotificationReceiveInfo: Used to recieve pending notifications
+ * for doorbells and queue pairs.
+ */
+struct vmci_ctx_notify_recv_info {
+	u64 db_handle_buf_uva;
+	u64 db_handle_buf_size;
+	u64 qp_handle_buf_uva;
+	u64 qp_handle_buf_size;
+	s32 result;
+	u32 _pad;
+};
+
+/*
+ * Utilility function that checks whether two entities are allowed
+ * to interact. If one of them is restricted, the other one must
+ * be trusted.
+ */
+static inline bool vmci_deny_interaction(u32 part_one, u32 part_two)
+{
+	return ((part_one & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+		!(part_two & VMCI_PRIVILEGE_FLAG_TRUSTED)) ||
+	       ((part_two & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+		!(part_one & VMCI_PRIVILEGE_FLAG_TRUSTED));
+}
+
+struct vmci_ctx *vmci_ctx_create(u32 cid, u32 flags,
+				 uintptr_t event_hnd, int version,
+				 const struct cred *cred);
+void vmci_ctx_destroy(struct vmci_ctx *context);
+
+bool vmci_ctx_supports_host_qp(struct vmci_ctx *context);
+int vmci_ctx_enqueue_datagram(u32 cid, struct vmci_datagram *dg);
+int vmci_ctx_dequeue_datagram(struct vmci_ctx *context,
+			      size_t *max_size, struct vmci_datagram **dg);
+int vmci_ctx_pending_datagrams(u32 cid, u32 *pending);
+struct vmci_ctx *vmci_ctx_get(u32 cid);
+void vmci_ctx_put(struct vmci_ctx *context);
+bool vmci_ctx_exists(u32 cid);
+
+int vmci_ctx_add_notification(u32 context_id, u32 remote_cid);
+int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid);
+int vmci_ctx_get_chkpt_state(u32 context_id, u32 cpt_type,
+			     u32 *num_cids, void **cpt_buf_ptr);
+int vmci_ctx_set_chkpt_state(u32 context_id, u32 cpt_type,
+			     u32 num_cids, void *cpt_buf);
+
+int vmci_ctx_qp_create(struct vmci_ctx *context, struct vmci_handle handle);
+int vmci_ctx_qp_destroy(struct vmci_ctx *context, struct vmci_handle handle);
+bool vmci_ctx_qp_exists(struct vmci_ctx *context, struct vmci_handle handle);
+
+void vmci_ctx_check_signal_notify(struct vmci_ctx *context);
+void vmci_ctx_unset_notify(struct vmci_ctx *context);
+
+int vmci_ctx_dbell_create(u32 context_id, struct vmci_handle handle);
+int vmci_ctx_dbell_destroy(u32 context_id, struct vmci_handle handle);
+int vmci_ctx_dbell_destroy_all(u32 context_id);
+int vmci_ctx_notify_dbell(u32 cid, struct vmci_handle handle,
+			  u32 src_priv_flags);
+
+int vmci_ctx_rcv_notifications_get(u32 context_id, struct vmci_handle_arr
+				   **db_handle_array, struct vmci_handle_arr
+				   **qp_handle_array);
+void vmci_ctx_rcv_notifications_release(u32 context_id, struct vmci_handle_arr
+					*db_handle_array, struct vmci_handle_arr
+					*qp_handle_array, bool success);
+
+static inline u32 vmci_ctx_get_id(struct vmci_ctx *context)
+{
+	if (!context)
+		return VMCI_INVALID_ID;
+	return context->cid;
+}
+
+#endif /* _VMCI_CONTEXT_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.c b/drivers/misc/vmw_vmci/vmci_datagram.c
new file mode 100644
index 0000000..ed5c433
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_datagram.c
@@ -0,0 +1,500 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bug.h>
+
+#include "vmci_datagram.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+#include "vmci_route.h"
+
+/*
+ * struct datagram_entry describes the datagram entity. It is used for datagram
+ * entities created only on the host.
+ */
+struct datagram_entry {
+	struct vmci_resource resource;
+	u32 flags;
+	bool run_delayed;
+	vmci_datagram_recv_cb recv_cb;
+	void *client_data;
+	u32 priv_flags;
+};
+
+struct delayed_datagram_info {
+	struct datagram_entry *entry;
+	struct vmci_datagram msg;
+	struct work_struct work;
+	bool in_dg_host_queue;
+};
+
+/* Number of in-flight host->host datagrams */
+static atomic_t delayed_dg_host_queue_size = ATOMIC_INIT(0);
+
+/*
+ * Create a datagram entry given a handle pointer.
+ */
+static int dg_create_handle(u32 resource_id,
+			    u32 flags,
+			    u32 priv_flags,
+			    vmci_datagram_recv_cb recv_cb,
+			    void *client_data, struct vmci_handle *out_handle)
+{
+	int result;
+	u32 context_id;
+	struct vmci_handle handle;
+	struct datagram_entry *entry;
+
+	if ((flags & VMCI_FLAG_WELLKNOWN_DG_HND) != 0)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	if ((flags & VMCI_FLAG_ANYCID_DG_HND) != 0) {
+		context_id = VMCI_INVALID_ID;
+	} else {
+		context_id = vmci_get_context_id();
+		if (context_id == VMCI_INVALID_ID)
+			return VMCI_ERROR_NO_RESOURCES;
+	}
+
+	handle = vmci_make_handle(context_id, resource_id);
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		pr_warn("Failed allocating memory for datagram entry\n");
+		return VMCI_ERROR_NO_MEM;
+	}
+
+	entry->run_delayed = (flags & VMCI_FLAG_DG_DELAYED_CB) ? true : false;
+	entry->flags = flags;
+	entry->recv_cb = recv_cb;
+	entry->client_data = client_data;
+	entry->priv_flags = priv_flags;
+
+	/* Make datagram resource live. */
+	result = vmci_resource_add(&entry->resource,
+				   VMCI_RESOURCE_TYPE_DATAGRAM,
+				   handle);
+	if (result != VMCI_SUCCESS) {
+		pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
+			handle.context, handle.resource, result);
+		kfree(entry);
+		return result;
+	}
+
+	*out_handle = vmci_resource_handle(&entry->resource);
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Internal utility function with the same purpose as
+ * vmci_datagram_get_priv_flags that also takes a context_id.
+ */
+static int vmci_datagram_get_priv_flags(u32 context_id,
+					struct vmci_handle handle,
+					u32 *priv_flags)
+{
+	if (context_id == VMCI_INVALID_ID)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	if (context_id == VMCI_HOST_CONTEXT_ID) {
+		struct datagram_entry *src_entry;
+		struct vmci_resource *resource;
+
+		resource = vmci_resource_by_handle(handle,
+						   VMCI_RESOURCE_TYPE_DATAGRAM);
+		if (!resource)
+			return VMCI_ERROR_INVALID_ARGS;
+
+		src_entry = container_of(resource, struct datagram_entry,
+					 resource);
+		*priv_flags = src_entry->priv_flags;
+		vmci_resource_put(resource);
+	} else if (context_id == VMCI_HYPERVISOR_CONTEXT_ID)
+		*priv_flags = VMCI_MAX_PRIVILEGE_FLAGS;
+	else
+		*priv_flags = vmci_context_get_priv_flags(context_id);
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Calls the specified callback in a delayed context.
+ */
+static void dg_delayed_dispatch(struct work_struct *work)
+{
+	struct delayed_datagram_info *dg_info =
+			container_of(work, struct delayed_datagram_info, work);
+
+	dg_info->entry->recv_cb(dg_info->entry->client_data, &dg_info->msg);
+
+	vmci_resource_put(&dg_info->entry->resource);
+
+	if (dg_info->in_dg_host_queue)
+		atomic_dec(&delayed_dg_host_queue_size);
+
+	kfree(dg_info);
+}
+
+/*
+ * Dispatch datagram as a host, to the host, or other vm context. This
+ * function cannot dispatch to hypervisor context handlers. This should
+ * have been handled before we get here by vmci_datagram_dispatch.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+static int dg_dispatch_as_host(u32 context_id, struct vmci_datagram *dg)
+{
+	int retval;
+	size_t dg_size;
+	u32 src_priv_flags;
+
+	dg_size = VMCI_DG_SIZE(dg);
+
+	/* Host cannot send to the hypervisor. */
+	if (dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID)
+		return VMCI_ERROR_DST_UNREACHABLE;
+
+	/* Check that source handle matches sending context. */
+	if (dg->src.context != context_id) {
+		pr_devel("Sender context (ID=0x%x) is not owner of src datagram entry (handle=0x%x:0x%x)\n",
+			 context_id, dg->src.context, dg->src.resource);
+		return VMCI_ERROR_NO_ACCESS;
+	}
+
+	/* Get hold of privileges of sending endpoint. */
+	retval = vmci_datagram_get_priv_flags(context_id, dg->src,
+					      &src_priv_flags);
+	if (retval != VMCI_SUCCESS) {
+		pr_warn("Couldn't get privileges (handle=0x%x:0x%x)\n",
+			dg->src.context, dg->src.resource);
+		return retval;
+	}
+
+	/* Determine if we should route to host or guest destination. */
+	if (dg->dst.context == VMCI_HOST_CONTEXT_ID) {
+		/* Route to host datagram entry. */
+		struct datagram_entry *dst_entry;
+		struct vmci_resource *resource;
+
+		if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
+		    dg->dst.resource == VMCI_EVENT_HANDLER) {
+			return vmci_event_dispatch(dg);
+		}
+
+		resource = vmci_resource_by_handle(dg->dst,
+						   VMCI_RESOURCE_TYPE_DATAGRAM);
+		if (!resource) {
+			pr_devel("Sending to invalid destination (handle=0x%x:0x%x)\n",
+				 dg->dst.context, dg->dst.resource);
+			return VMCI_ERROR_INVALID_RESOURCE;
+		}
+		dst_entry = container_of(resource, struct datagram_entry,
+					 resource);
+		if (vmci_deny_interaction(src_priv_flags,
+					  dst_entry->priv_flags)) {
+			vmci_resource_put(resource);
+			return VMCI_ERROR_NO_ACCESS;
+		}
+
+		/*
+		 * If a VMCI datagram destined for the host is also sent by the
+		 * host, we always run it delayed. This ensures that no locks
+		 * are held when the datagram callback runs.
+		 */
+		if (dst_entry->run_delayed ||
+		    dg->src.context == VMCI_HOST_CONTEXT_ID) {
+			struct delayed_datagram_info *dg_info;
+
+			if (atomic_add_return(1, &delayed_dg_host_queue_size)
+			    == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) {
+				atomic_dec(&delayed_dg_host_queue_size);
+				vmci_resource_put(resource);
+				return VMCI_ERROR_NO_MEM;
+			}
+
+			dg_info = kmalloc(sizeof(*dg_info) +
+				    (size_t) dg->payload_size, GFP_ATOMIC);
+			if (!dg_info) {
+				atomic_dec(&delayed_dg_host_queue_size);
+				vmci_resource_put(resource);
+				return VMCI_ERROR_NO_MEM;
+			}
+
+			dg_info->in_dg_host_queue = true;
+			dg_info->entry = dst_entry;
+			memcpy(&dg_info->msg, dg, dg_size);
+
+			INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+			schedule_work(&dg_info->work);
+			retval = VMCI_SUCCESS;
+
+		} else {
+			retval = dst_entry->recv_cb(dst_entry->client_data, dg);
+			vmci_resource_put(resource);
+			if (retval < VMCI_SUCCESS)
+				return retval;
+		}
+	} else {
+		/* Route to destination VM context. */
+		struct vmci_datagram *new_dg;
+
+		if (context_id != dg->dst.context) {
+			if (vmci_deny_interaction(src_priv_flags,
+						  vmci_context_get_priv_flags
+						  (dg->dst.context))) {
+				return VMCI_ERROR_NO_ACCESS;
+			} else if (VMCI_CONTEXT_IS_VM(context_id)) {
+				/*
+				 * If the sending context is a VM, it
+				 * cannot reach another VM.
+				 */
+
+				pr_devel("Datagram communication between VMs not supported (src=0x%x, dst=0x%x)\n",
+					 context_id, dg->dst.context);
+				return VMCI_ERROR_DST_UNREACHABLE;
+			}
+		}
+
+		/* We make a copy to enqueue. */
+		new_dg = kmalloc(dg_size, GFP_KERNEL);
+		if (new_dg == NULL)
+			return VMCI_ERROR_NO_MEM;
+
+		memcpy(new_dg, dg, dg_size);
+		retval = vmci_ctx_enqueue_datagram(dg->dst.context, new_dg);
+		if (retval < VMCI_SUCCESS) {
+			kfree(new_dg);
+			return retval;
+		}
+	}
+
+	/*
+	 * We currently truncate the size to signed 32 bits. This doesn't
+	 * matter for this handler as it only support 4Kb messages.
+	 */
+	return (int)dg_size;
+}
+
+/*
+ * Dispatch datagram as a guest, down through the VMX and potentially to
+ * the host.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+static int dg_dispatch_as_guest(struct vmci_datagram *dg)
+{
+	int retval;
+	struct vmci_resource *resource;
+
+	resource = vmci_resource_by_handle(dg->src,
+					   VMCI_RESOURCE_TYPE_DATAGRAM);
+	if (!resource)
+		return VMCI_ERROR_NO_HANDLE;
+
+	retval = vmci_send_datagram(dg);
+	vmci_resource_put(resource);
+	return retval;
+}
+
+/*
+ * Dispatch datagram.  This will determine the routing for the datagram
+ * and dispatch it accordingly.
+ * Returns number of bytes sent on success, error code otherwise.
+ */
+int vmci_datagram_dispatch(u32 context_id,
+			   struct vmci_datagram *dg, bool from_guest)
+{
+	int retval;
+	enum vmci_route route;
+
+	BUILD_BUG_ON(sizeof(struct vmci_datagram) != 24);
+
+	if (VMCI_DG_SIZE(dg) > VMCI_MAX_DG_SIZE) {
+		pr_devel("Payload (size=%llu bytes) too big to send\n",
+			 (unsigned long long)dg->payload_size);
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	retval = vmci_route(&dg->src, &dg->dst, from_guest, &route);
+	if (retval < VMCI_SUCCESS) {
+		pr_devel("Failed to route datagram (src=0x%x, dst=0x%x, err=%d)\n",
+			 dg->src.context, dg->dst.context, retval);
+		return retval;
+	}
+
+	if (VMCI_ROUTE_AS_HOST == route) {
+		if (VMCI_INVALID_ID == context_id)
+			context_id = VMCI_HOST_CONTEXT_ID;
+		return dg_dispatch_as_host(context_id, dg);
+	}
+
+	if (VMCI_ROUTE_AS_GUEST == route)
+		return dg_dispatch_as_guest(dg);
+
+	pr_warn("Unknown route (%d) for datagram\n", route);
+	return VMCI_ERROR_DST_UNREACHABLE;
+}
+
+/*
+ * Invoke the handler for the given datagram.  This is intended to be
+ * called only when acting as a guest and receiving a datagram from the
+ * virtual device.
+ */
+int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg)
+{
+	struct vmci_resource *resource;
+	struct datagram_entry *dst_entry;
+
+	resource = vmci_resource_by_handle(dg->dst,
+					   VMCI_RESOURCE_TYPE_DATAGRAM);
+	if (!resource) {
+		pr_devel("destination (handle=0x%x:0x%x) doesn't exist\n",
+			 dg->dst.context, dg->dst.resource);
+		return VMCI_ERROR_NO_HANDLE;
+	}
+
+	dst_entry = container_of(resource, struct datagram_entry, resource);
+	if (dst_entry->run_delayed) {
+		struct delayed_datagram_info *dg_info;
+
+		dg_info = kmalloc(sizeof(*dg_info) + (size_t)dg->payload_size,
+				  GFP_ATOMIC);
+		if (!dg_info) {
+			vmci_resource_put(resource);
+			return VMCI_ERROR_NO_MEM;
+		}
+
+		dg_info->in_dg_host_queue = false;
+		dg_info->entry = dst_entry;
+		memcpy(&dg_info->msg, dg, VMCI_DG_SIZE(dg));
+
+		INIT_WORK(&dg_info->work, dg_delayed_dispatch);
+		schedule_work(&dg_info->work);
+	} else {
+		dst_entry->recv_cb(dst_entry->client_data, dg);
+		vmci_resource_put(resource);
+	}
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * vmci_datagram_create_handle_priv() - Create host context datagram endpoint
+ * @resource_id:        The resource ID.
+ * @flags:      Datagram Flags.
+ * @priv_flags: Privilege Flags.
+ * @recv_cb:    Callback when receiving datagrams.
+ * @client_data:        Pointer for a datagram_entry struct
+ * @out_handle: vmci_handle that is populated as a result of this function.
+ *
+ * Creates a host context datagram endpoint and returns a handle to it.
+ */
+int vmci_datagram_create_handle_priv(u32 resource_id,
+				     u32 flags,
+				     u32 priv_flags,
+				     vmci_datagram_recv_cb recv_cb,
+				     void *client_data,
+				     struct vmci_handle *out_handle)
+{
+	if (out_handle == NULL)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	if (recv_cb == NULL) {
+		pr_devel("Client callback needed when creating datagram\n");
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	if (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	return dg_create_handle(resource_id, flags, priv_flags, recv_cb,
+				client_data, out_handle);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_create_handle_priv);
+
+/*
+ * vmci_datagram_create_handle() - Create host context datagram endpoint
+ * @resource_id:        Resource ID.
+ * @flags:      Datagram Flags.
+ * @recv_cb:    Callback when receiving datagrams.
+ * @client_ata: Pointer for a datagram_entry struct
+ * @out_handle: vmci_handle that is populated as a result of this function.
+ *
+ * Creates a host context datagram endpoint and returns a handle to
+ * it.  Same as vmci_datagram_create_handle_priv without the priviledge
+ * flags argument.
+ */
+int vmci_datagram_create_handle(u32 resource_id,
+				u32 flags,
+				vmci_datagram_recv_cb recv_cb,
+				void *client_data,
+				struct vmci_handle *out_handle)
+{
+	return vmci_datagram_create_handle_priv(
+		resource_id, flags,
+		VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
+		recv_cb, client_data,
+		out_handle);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_create_handle);
+
+/*
+ * vmci_datagram_destroy_handle() - Destroys datagram handle
+ * @handle:     vmci_handle to be destroyed and reaped.
+ *
+ * Use this function to destroy any datagram handles created by
+ * vmci_datagram_create_handle{,Priv} functions.
+ */
+int vmci_datagram_destroy_handle(struct vmci_handle handle)
+{
+	struct datagram_entry *entry;
+	struct vmci_resource *resource;
+
+	resource = vmci_resource_by_handle(handle, VMCI_RESOURCE_TYPE_DATAGRAM);
+	if (!resource) {
+		pr_devel("Failed to destroy datagram (handle=0x%x:0x%x)\n",
+			 handle.context, handle.resource);
+		return VMCI_ERROR_NOT_FOUND;
+	}
+
+	entry = container_of(resource, struct datagram_entry, resource);
+
+	vmci_resource_put(&entry->resource);
+	vmci_resource_remove(&entry->resource);
+	kfree(entry);
+
+	return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_destroy_handle);
+
+/*
+ * vmci_datagram_send() - Send a datagram
+ * @msg:        The datagram to send.
+ *
+ * Sends the provided datagram on its merry way.
+ */
+int vmci_datagram_send(struct vmci_datagram *msg)
+{
+	if (msg == NULL)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	return vmci_datagram_dispatch(VMCI_INVALID_ID, msg, false);
+}
+EXPORT_SYMBOL_GPL(vmci_datagram_send);
diff --git a/drivers/misc/vmw_vmci/vmci_datagram.h b/drivers/misc/vmw_vmci/vmci_datagram.h
new file mode 100644
index 0000000..eb4aab7
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_datagram.h
@@ -0,0 +1,52 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_DATAGRAM_H_
+#define _VMCI_DATAGRAM_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include "vmci_context.h"
+
+#define VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE 256
+
+/*
+ * The struct vmci_datagram_queue_entry is a queue header for the in-kernel VMCI
+ * datagram queues. It is allocated in non-paged memory, as the
+ * content is accessed while holding a spinlock. The pending datagram
+ * itself may be allocated from paged memory. We shadow the size of
+ * the datagram in the non-paged queue entry as this size is used
+ * while holding the same spinlock as above.
+ */
+struct vmci_datagram_queue_entry {
+	struct list_head list_item;	/* For queuing. */
+	size_t dg_size;		/* Size of datagram. */
+	struct vmci_datagram *dg;	/* Pending datagram. */
+};
+
+/* VMCIDatagramSendRecvInfo */
+struct vmci_datagram_snd_rcv_info {
+	u64 addr;
+	u32 len;
+	s32 result;
+};
+
+/* Datagram API for non-public use. */
+int vmci_datagram_dispatch(u32 context_id, struct vmci_datagram *dg,
+			   bool from_guest);
+int vmci_datagram_invoke_guest_handler(struct vmci_datagram *dg);
+
+#endif /* _VMCI_DATAGRAM_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
new file mode 100644
index 0000000..c3e8397
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
@@ -0,0 +1,604 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/completion.h>
+#include <linux/hash.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_resource.h"
+#include "vmci_driver.h"
+#include "vmci_route.h"
+
+
+#define VMCI_DOORBELL_INDEX_BITS	6
+#define VMCI_DOORBELL_INDEX_TABLE_SIZE	(1 << VMCI_DOORBELL_INDEX_BITS)
+#define VMCI_DOORBELL_HASH(_idx)	hash_32(_idx, VMCI_DOORBELL_INDEX_BITS)
+
+/*
+ * DoorbellEntry describes the a doorbell notification handle allocated by the
+ * host.
+ */
+struct dbell_entry {
+	struct vmci_resource resource;
+	struct hlist_node node;
+	struct work_struct work;
+	vmci_callback notify_cb;
+	void *client_data;
+	u32 idx;
+	u32 priv_flags;
+	bool run_delayed;
+	atomic_t active;	/* Only used by guest personality */
+};
+
+/* The VMCI index table keeps track of currently registered doorbells. */
+struct dbell_index_table {
+	spinlock_t lock;	/* Index table lock */
+	struct hlist_head entries[VMCI_DOORBELL_INDEX_TABLE_SIZE];
+};
+
+static struct dbell_index_table vmci_doorbell_it = {
+	.lock = __SPIN_LOCK_UNLOCKED(vmci_doorbell_it.lock),
+};
+
+/*
+ * The max_notify_idx is one larger than the currently known bitmap index in
+ * use, and is used to determine how much of the bitmap needs to be scanned.
+ */
+static u32 max_notify_idx;
+
+/*
+ * The notify_idx_count is used for determining whether there are free entries
+ * within the bitmap (if notify_idx_count + 1 < max_notify_idx).
+ */
+static u32 notify_idx_count;
+
+/*
+ * The last_notify_idx_reserved is used to track the last index handed out - in
+ * the case where multiple handles share a notification index, we hand out
+ * indexes round robin based on last_notify_idx_reserved.
+ */
+static u32 last_notify_idx_reserved;
+
+/* This is a one entry cache used to by the index allocation. */
+static u32 last_notify_idx_released = PAGE_SIZE;
+
+
+/*
+ * Utility function that retrieves the privilege flags associated
+ * with a given doorbell handle. For guest endpoints, the
+ * privileges are determined by the context ID, but for host
+ * endpoints privileges are associated with the complete
+ * handle. Hypervisor endpoints are not yet supported.
+ */
+int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags)
+{
+	if (priv_flags == NULL || handle.context == VMCI_INVALID_ID)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	if (handle.context == VMCI_HOST_CONTEXT_ID) {
+		struct dbell_entry *entry;
+		struct vmci_resource *resource;
+
+		resource = vmci_resource_by_handle(handle,
+						   VMCI_RESOURCE_TYPE_DOORBELL);
+		if (!resource)
+			return VMCI_ERROR_NOT_FOUND;
+
+		entry = container_of(resource, struct dbell_entry, resource);
+		*priv_flags = entry->priv_flags;
+		vmci_resource_put(resource);
+	} else if (handle.context == VMCI_HYPERVISOR_CONTEXT_ID) {
+		/*
+		 * Hypervisor endpoints for notifications are not
+		 * supported (yet).
+		 */
+		return VMCI_ERROR_INVALID_ARGS;
+	} else {
+		*priv_flags = vmci_context_get_priv_flags(handle.context);
+	}
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Find doorbell entry by bitmap index.
+ */
+static struct dbell_entry *dbell_index_table_find(u32 idx)
+{
+	u32 bucket = VMCI_DOORBELL_HASH(idx);
+	struct dbell_entry *dbell;
+	struct hlist_node *node;
+
+	hlist_for_each_entry(dbell, node, &vmci_doorbell_it.entries[bucket],
+			     node) {
+		if (idx == dbell->idx)
+			return dbell;
+	}
+
+	return NULL;
+}
+
+/*
+ * Add the given entry to the index table.  This willi take a reference to the
+ * entry's resource so that the entry is not deleted before it is removed from
+ * the * table.
+ */
+static void dbell_index_table_add(struct dbell_entry *entry)
+{
+	u32 bucket;
+	u32 new_notify_idx;
+
+	vmci_resource_get(&entry->resource);
+
+	spin_lock_bh(&vmci_doorbell_it.lock);
+
+	/*
+	 * Below we try to allocate an index in the notification
+	 * bitmap with "not too much" sharing between resources. If we
+	 * use less that the full bitmap, we either add to the end if
+	 * there are no unused flags within the currently used area,
+	 * or we search for unused ones. If we use the full bitmap, we
+	 * allocate the index round robin.
+	 */
+	if (max_notify_idx < PAGE_SIZE || notify_idx_count < PAGE_SIZE) {
+		if (last_notify_idx_released < max_notify_idx &&
+		    !dbell_index_table_find(last_notify_idx_released)) {
+			new_notify_idx = last_notify_idx_released;
+			last_notify_idx_released = PAGE_SIZE;
+		} else {
+			bool reused = false;
+			new_notify_idx = last_notify_idx_reserved;
+			if (notify_idx_count + 1 < max_notify_idx) {
+				do {
+					if (!dbell_index_table_find
+					    (new_notify_idx)) {
+						reused = true;
+						break;
+					}
+					new_notify_idx = (new_notify_idx + 1) %
+					    max_notify_idx;
+				} while (new_notify_idx !=
+					 last_notify_idx_released);
+			}
+			if (!reused) {
+				new_notify_idx = max_notify_idx;
+				max_notify_idx++;
+			}
+		}
+	} else {
+		new_notify_idx = (last_notify_idx_reserved + 1) % PAGE_SIZE;
+	}
+
+	last_notify_idx_reserved = new_notify_idx;
+	notify_idx_count++;
+
+	entry->idx = new_notify_idx;
+	bucket = VMCI_DOORBELL_HASH(entry->idx);
+	hlist_add_head(&entry->node, &vmci_doorbell_it.entries[bucket]);
+
+	spin_unlock_bh(&vmci_doorbell_it.lock);
+}
+
+/*
+ * Remove the given entry from the index table.  This will release() the
+ * entry's resource.
+ */
+static void dbell_index_table_remove(struct dbell_entry *entry)
+{
+	spin_lock_bh(&vmci_doorbell_it.lock);
+
+	hlist_del_init(&entry->node);
+
+	notify_idx_count--;
+	if (entry->idx == max_notify_idx - 1) {
+		/*
+		 * If we delete an entry with the maximum known
+		 * notification index, we take the opportunity to
+		 * prune the current max. As there might be other
+		 * unused indices immediately below, we lower the
+		 * maximum until we hit an index in use.
+		 */
+		while (max_notify_idx > 0 &&
+		       !dbell_index_table_find(max_notify_idx - 1))
+			max_notify_idx--;
+	}
+
+	last_notify_idx_released = entry->idx;
+
+	spin_unlock_bh(&vmci_doorbell_it.lock);
+
+	vmci_resource_put(&entry->resource);
+}
+
+/*
+ * Creates a link between the given doorbell handle and the given
+ * index in the bitmap in the device backend. A notification state
+ * is created in hypervisor.
+ */
+static int dbell_link(struct vmci_handle handle, u32 notify_idx)
+{
+	struct vmci_doorbell_link_msg link_msg;
+
+	link_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+					    VMCI_DOORBELL_LINK);
+	link_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+	link_msg.hdr.payload_size = sizeof(link_msg) - VMCI_DG_HEADERSIZE;
+	link_msg.handle = handle;
+	link_msg.notify_idx = notify_idx;
+
+	return vmci_send_datagram(&link_msg.hdr);
+}
+
+/*
+ * Unlinks the given doorbell handle from an index in the bitmap in
+ * the device backend. The notification state is destroyed in hypervisor.
+ */
+static int dbell_unlink(struct vmci_handle handle)
+{
+	struct vmci_doorbell_unlink_msg unlink_msg;
+
+	unlink_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+					      VMCI_DOORBELL_UNLINK);
+	unlink_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+	unlink_msg.hdr.payload_size = sizeof(unlink_msg) - VMCI_DG_HEADERSIZE;
+	unlink_msg.handle = handle;
+
+	return vmci_send_datagram(&unlink_msg.hdr);
+}
+
+/*
+ * Notify another guest or the host.  We send a datagram down to the
+ * host via the hypervisor with the notification info.
+ */
+static int dbell_notify_as_guest(struct vmci_handle handle, u32 priv_flags)
+{
+	struct vmci_doorbell_notify_msg notify_msg;
+
+	notify_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+					      VMCI_DOORBELL_NOTIFY);
+	notify_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+	notify_msg.hdr.payload_size = sizeof(notify_msg) - VMCI_DG_HEADERSIZE;
+	notify_msg.handle = handle;
+
+	return vmci_send_datagram(&notify_msg.hdr);
+}
+
+/*
+ * Calls the specified callback in a delayed context.
+ */
+static void dbell_delayed_dispatch(struct work_struct *work)
+{
+	struct dbell_entry *entry = container_of(work,
+						 struct dbell_entry, work);
+
+	entry->notify_cb(entry->client_data);
+	vmci_resource_put(&entry->resource);
+}
+
+/*
+ * Dispatches a doorbell notification to the host context.
+ */
+int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
+{
+	struct dbell_entry *entry;
+	struct vmci_resource *resource;
+
+	if (vmci_handle_is_invalid(handle)) {
+		pr_devel("Notifying an invalid doorbell (handle=0x%x:0x%x)\n",
+			 handle.context, handle.resource);
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	resource = vmci_resource_by_handle(handle,
+					   VMCI_RESOURCE_TYPE_DOORBELL);
+	if (!resource) {
+		pr_devel("Notifying an unknown doorbell (handle=0x%x:0x%x)\n",
+			 handle.context, handle.resource);
+		return VMCI_ERROR_NOT_FOUND;
+	}
+
+	entry = container_of(resource, struct dbell_entry, resource);
+	if (entry->run_delayed) {
+		schedule_work(&entry->work);
+	} else {
+		entry->notify_cb(entry->client_data);
+		vmci_resource_put(resource);
+	}
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Register the notification bitmap with the host.
+ */
+bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn)
+{
+	int result;
+	struct vmci_notify_bm_set_msg bitmap_set_msg;
+
+	bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+						  VMCI_SET_NOTIFY_BITMAP);
+	bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+	bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) -
+	    VMCI_DG_HEADERSIZE;
+	bitmap_set_msg.bitmap_ppn = bitmap_ppn;
+
+	result = vmci_send_datagram(&bitmap_set_msg.hdr);
+	if (result != VMCI_SUCCESS) {
+		pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n",
+			 bitmap_ppn, result);
+		return false;
+	}
+	return true;
+}
+
+/*
+ * Executes or schedules the handlers for a given notify index.
+ */
+static void dbell_fire_entries(u32 notify_idx)
+{
+	u32 bucket = VMCI_DOORBELL_HASH(notify_idx);
+	struct dbell_entry *dbell;
+	struct hlist_node *node;
+
+	spin_lock_bh(&vmci_doorbell_it.lock);
+
+	hlist_for_each_entry(dbell, node,
+			     &vmci_doorbell_it.entries[bucket], node) {
+		if (dbell->idx == notify_idx &&
+		    atomic_read(&dbell->active) == 1) {
+			if (dbell->run_delayed) {
+				vmci_resource_get(&dbell->resource);
+				schedule_work(&dbell->work);
+			} else {
+				dbell->notify_cb(dbell->client_data);
+			}
+		}
+	}
+
+	spin_unlock_bh(&vmci_doorbell_it.lock);
+}
+
+/*
+ * Scans the notification bitmap, collects pending notifications,
+ * resets the bitmap and invokes appropriate callbacks.
+ */
+void vmci_dbell_scan_notification_entries(u8 *bitmap)
+{
+	u32 idx;
+
+	for (idx = 0; idx < max_notify_idx; idx++) {
+		if (bitmap[idx] & 0x1) {
+			bitmap[idx] &= ~1;
+			dbell_fire_entries(idx);
+		}
+	}
+}
+
+/*
+ * vmci_doorbell_create() - Creates a doorbell
+ * @handle:     A handle used to track the resource.  Can be invalid.
+ * @flags:      Flag that determines context of callback.
+ * @priv_flags: Privileges flags.
+ * @notify_cb:  The callback to be ivoked when the doorbell fires.
+ * @client_data:        A parameter to be passed to the callback.
+ *
+ * Creates a doorbell with the given callback. If the handle is
+ * VMCI_INVALID_HANDLE, a free handle will be assigned, if
+ * possible. The callback can be run immediately (potentially with
+ * locks held - the default) or delayed (in a kernel thread) by
+ * specifying the flag VMCI_FLAG_DELAYED_CB. If delayed execution
+ * is selected, a given callback may not be run if the kernel is
+ * unable to allocate memory for the delayed execution (highly
+ * unlikely).
+ */
+int vmci_doorbell_create(struct vmci_handle *handle,
+			 u32 flags,
+			 u32 priv_flags,
+			 vmci_callback notify_cb, void *client_data)
+{
+	struct dbell_entry *entry;
+	struct vmci_handle new_handle;
+	int result;
+
+	if (!handle || !notify_cb || flags & ~VMCI_FLAG_DELAYED_CB ||
+	    priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (entry == NULL) {
+		pr_warn("Failed allocating memory for datagram entry\n");
+		return VMCI_ERROR_NO_MEM;
+	}
+
+	if (vmci_handle_is_invalid(*handle)) {
+		u32 context_id = vmci_get_context_id();
+
+		/* Let resource code allocate a free ID for us */
+		new_handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
+	} else {
+		bool valid_context = false;
+
+		/*
+		 * Validate the handle.  We must do both of the checks below
+		 * because we can be acting as both a host and a guest at the
+		 * same time. We always allow the host context ID, since the
+		 * host functionality is in practice always there with the
+		 * unified driver.
+		 */
+		if (handle->context == VMCI_HOST_CONTEXT_ID ||
+		    (vmci_guest_code_active() &&
+		     vmci_get_context_id() == handle->context)) {
+			valid_context = true;
+		}
+
+		if (!valid_context || handle->resource == VMCI_INVALID_ID) {
+			pr_devel("Invalid argument (handle=0x%x:0x%x)\n",
+				 handle->context, handle->resource);
+			result = VMCI_ERROR_INVALID_ARGS;
+			goto free_mem;
+		}
+
+		new_handle = *handle;
+	}
+
+	entry->idx = 0;
+	INIT_HLIST_NODE(&entry->node);
+	entry->priv_flags = priv_flags;
+	INIT_WORK(&entry->work, dbell_delayed_dispatch);
+	entry->run_delayed = flags & VMCI_FLAG_DELAYED_CB;
+	entry->notify_cb = notify_cb;
+	entry->client_data = client_data;
+	atomic_set(&entry->active, 0);
+
+	result = vmci_resource_add(&entry->resource,
+				   VMCI_RESOURCE_TYPE_DOORBELL,
+				   new_handle);
+	if (result != VMCI_SUCCESS) {
+		pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d\n",
+			new_handle.context, new_handle.resource, result);
+		goto free_mem;
+	}
+
+	new_handle = vmci_resource_handle(&entry->resource);
+	if (vmci_guest_code_active()) {
+		dbell_index_table_add(entry);
+		result = dbell_link(new_handle, entry->idx);
+		if (VMCI_SUCCESS != result)
+			goto destroy_resource;
+
+		atomic_set(&entry->active, 1);
+	}
+
+	*handle = new_handle;
+
+	return result;
+
+ destroy_resource:
+	dbell_index_table_remove(entry);
+	vmci_resource_remove(&entry->resource);
+ free_mem:
+	kfree(entry);
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_create);
+
+/*
+ * vmci_doorbell_destroy() - Destroy a doorbell.
+ * @handle:     The handle tracking the resource.
+ *
+ * Destroys a doorbell previously created with vmcii_doorbell_create. This
+ * operation may block waiting for a callback to finish.
+ */
+int vmci_doorbell_destroy(struct vmci_handle handle)
+{
+	struct dbell_entry *entry;
+	struct vmci_resource *resource;
+
+	if (vmci_handle_is_invalid(handle))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	resource = vmci_resource_by_handle(handle,
+					   VMCI_RESOURCE_TYPE_DOORBELL);
+	if (!resource) {
+		pr_devel("Failed to destroy doorbell (handle=0x%x:0x%x)\n",
+			 handle.context, handle.resource);
+		return VMCI_ERROR_NOT_FOUND;
+	}
+
+	entry = container_of(resource, struct dbell_entry, resource);
+
+	if (vmci_guest_code_active()) {
+		int result;
+
+		dbell_index_table_remove(entry);
+
+		result = dbell_unlink(handle);
+		if (VMCI_SUCCESS != result) {
+
+			/*
+			 * The only reason this should fail would be
+			 * an inconsistency between guest and
+			 * hypervisor state, where the guest believes
+			 * it has an active registration whereas the
+			 * hypervisor doesn't. One case where this may
+			 * happen is if a doorbell is unregistered
+			 * following a hibernation at a time where the
+			 * doorbell state hasn't been restored on the
+			 * hypervisor side yet. Since the handle has
+			 * now been removed in the guest, we just
+			 * print a warning and return success.
+			 */
+			pr_devel("Unlink of doorbell (handle=0x%x:0x%x) unknown by hypervisor (error=%d)\n",
+				 handle.context, handle.resource, result);
+		}
+	}
+
+	/*
+	 * Now remove the resource from the table.  It might still be in use
+	 * after this, in a callback or still on the delayed work queue.
+	 */
+	vmci_resource_put(&entry->resource);
+	vmci_resource_remove(&entry->resource);
+
+	kfree(entry);
+
+	return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_destroy);
+
+/*
+ * vmci_doorbell_notify() - Ring the doorbell (and hide in the bushes).
+ * @dst:        The handlle identifying the doorbell resource
+ * @priv_flags: Priviledge flags.
+ *
+ * Generates a notification on the doorbell identified by the
+ * handle. For host side generation of notifications, the caller
+ * can specify what the privilege of the calling side is.
+ */
+int vmci_doorbell_notify(struct vmci_handle dst, u32 priv_flags)
+{
+	int retval;
+	enum vmci_route route;
+	struct vmci_handle src;
+
+	if (vmci_handle_is_invalid(dst) ||
+	    (priv_flags & ~VMCI_PRIVILEGE_ALL_FLAGS))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	src = VMCI_INVALID_HANDLE;
+	retval = vmci_route(&src, &dst, false, &route);
+	if (retval < VMCI_SUCCESS)
+		return retval;
+
+	if (VMCI_ROUTE_AS_HOST == route)
+		return vmci_ctx_notify_dbell(VMCI_HOST_CONTEXT_ID,
+					     dst, priv_flags);
+
+	if (VMCI_ROUTE_AS_GUEST == route)
+		return dbell_notify_as_guest(dst, priv_flags);
+
+	pr_warn("Unknown route (%d) for doorbell\n", route);
+	return VMCI_ERROR_DST_UNREACHABLE;
+}
+EXPORT_SYMBOL_GPL(vmci_doorbell_notify);
diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h
new file mode 100644
index 0000000..e4c0b17
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_doorbell.h
@@ -0,0 +1,51 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef VMCI_DOORBELL_H
+#define VMCI_DOORBELL_H
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_driver.h"
+
+/*
+ * VMCINotifyResourceInfo: Used to create and destroy doorbells, and
+ * generate a notification for a doorbell or queue pair.
+ */
+struct vmci_dbell_notify_resource_info {
+	struct vmci_handle handle;
+	u16 resource;
+	u16 action;
+	s32 result;
+};
+
+/*
+ * Structure used for checkpointing the doorbell mappings. It is
+ * written to the checkpoint as is, so changing this structure will
+ * break checkpoint compatibility.
+ */
+struct dbell_cpt_state {
+	struct vmci_handle handle;
+	u64 bitmap_idx;
+};
+
+int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle);
+int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags);
+
+bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn);
+void vmci_dbell_scan_notification_entries(u8 *bitmap);
+
+#endif /* VMCI_DOORBELL_H */
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
new file mode 100644
index 0000000..7b3fce2
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -0,0 +1,117 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+static bool vmci_disable_host;
+module_param_named(disable_host, vmci_disable_host, bool, 0);
+MODULE_PARM_DESC(disable_host,
+		 "Disable driver host personality (default=enabled)");
+
+static bool vmci_disable_guest;
+module_param_named(disable_guest, vmci_disable_guest, bool, 0);
+MODULE_PARM_DESC(disable_guest,
+		 "Disable driver guest personality (default=enabled)");
+
+static bool vmci_guest_personality_initialized;
+static bool vmci_host_personality_initialized;
+
+/*
+ * vmci_get_context_id() - Gets the current context ID.
+ *
+ * Returns the current context ID.  Note that since this is accessed only
+ * from code running in the host, this always returns the host context ID.
+ */
+u32 vmci_get_context_id(void)
+{
+	if (vmci_guest_code_active())
+		return vmci_get_vm_context_id();
+	else if (vmci_host_code_active())
+		return VMCI_HOST_CONTEXT_ID;
+
+	return VMCI_INVALID_ID;
+}
+EXPORT_SYMBOL_GPL(vmci_get_context_id);
+
+static int __init vmci_drv_init(void)
+{
+	int vmci_err;
+	int error;
+
+	vmci_err = vmci_event_init();
+	if (vmci_err < VMCI_SUCCESS) {
+		pr_err("Failed to initialize VMCIEvent (result=%d)\n",
+		       vmci_err);
+		return -EINVAL;
+	}
+
+	if (!vmci_disable_guest) {
+		error = vmci_guest_init();
+		if (error) {
+			pr_warn("Failed to initialize guest personality (err=%d)\n",
+				error);
+		} else {
+			vmci_guest_personality_initialized = true;
+			pr_info("Guest personality initialized and is %s\n",
+				vmci_guest_code_active() ?
+				"active" : "inactive");
+		}
+	}
+
+	if (!vmci_disable_host) {
+		error = vmci_host_init();
+		if (error) {
+			pr_warn("Unable to initialize host personality (err=%d)\n",
+				error);
+		} else {
+			vmci_host_personality_initialized = true;
+			pr_info("Initialized host personality\n");
+		}
+	}
+
+	if (!vmci_guest_personality_initialized &&
+	    !vmci_host_personality_initialized) {
+		vmci_event_exit();
+		return -ENODEV;
+	}
+
+	return 0;
+}
+module_init(vmci_drv_init);
+
+static void __exit vmci_drv_exit(void)
+{
+	if (vmci_guest_personality_initialized)
+		vmci_guest_exit();
+
+	if (vmci_host_personality_initialized)
+		vmci_host_exit();
+
+	vmci_event_exit();
+}
+module_exit(vmci_drv_exit);
+
+MODULE_AUTHOR("VMware, Inc.");
+MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
+MODULE_VERSION("1.0.0.0-k");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
new file mode 100644
index 0000000..f69156a
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -0,0 +1,50 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_DRIVER_H_
+#define _VMCI_DRIVER_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/wait.h>
+
+#include "vmci_queue_pair.h"
+#include "vmci_context.h"
+
+enum vmci_obj_type {
+	VMCIOBJ_VMX_VM = 10,
+	VMCIOBJ_CONTEXT,
+	VMCIOBJ_SOCKET,
+	VMCIOBJ_NOT_SET,
+};
+
+/* For storing VMCI structures in file handles. */
+struct vmci_obj {
+	void *ptr;
+	enum vmci_obj_type type;
+};
+
+u32 vmci_get_context_id(void);
+int vmci_send_datagram(struct vmci_datagram *dg);
+
+int vmci_host_init(void);
+void vmci_host_exit(void);
+bool vmci_host_code_active(void);
+
+int vmci_guest_init(void);
+void vmci_guest_exit(void);
+bool vmci_guest_code_active(void);
+u32 vmci_get_vm_context_id(void);
+
+#endif /* _VMCI_DRIVER_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
new file mode 100644
index 0000000..8449516
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_event.c
@@ -0,0 +1,224 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define EVENT_MAGIC 0xEABE0000
+#define VMCI_EVENT_MAX_ATTEMPTS 10
+
+struct vmci_subscription {
+	u32 id;
+	u32 event;
+	vmci_event_cb callback;
+	void *callback_data;
+	struct list_head node;	/* on one of subscriber lists */
+};
+
+static struct list_head subscriber_array[VMCI_EVENT_MAX];
+static DEFINE_MUTEX(subscriber_mutex);
+
+int __init vmci_event_init(void)
+{
+	int i;
+
+	for (i = 0; i < VMCI_EVENT_MAX; i++)
+		INIT_LIST_HEAD(&subscriber_array[i]);
+
+	return VMCI_SUCCESS;
+}
+
+void vmci_event_exit(void)
+{
+	int e;
+
+	/* We free all memory at exit. */
+	for (e = 0; e < VMCI_EVENT_MAX; e++) {
+		struct vmci_subscription *cur, *p2;
+		list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
+
+			/*
+			 * We should never get here because all events
+			 * should have been unregistered before we try
+			 * to unload the driver module.
+			 */
+			pr_warn("Unexpected free events occurring\n");
+			list_del(&cur->node);
+			kfree(cur);
+		}
+	}
+}
+
+/*
+ * Find entry. Assumes subscriber_mutex is held.
+ */
+static struct vmci_subscription *event_find(u32 sub_id)
+{
+	int e;
+
+	for (e = 0; e < VMCI_EVENT_MAX; e++) {
+		struct vmci_subscription *cur;
+		list_for_each_entry(cur, &subscriber_array[e], node) {
+			if (cur->id == sub_id)
+				return cur;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Actually delivers the events to the subscribers.
+ * The callback function for each subscriber is invoked.
+ */
+static void event_deliver(struct vmci_event_msg *event_msg)
+{
+	struct vmci_subscription *cur;
+	struct list_head *subscriber_list;
+
+	rcu_read_lock();
+	subscriber_list = &subscriber_array[event_msg->event_data.event];
+	list_for_each_entry_rcu(cur, subscriber_list, node) {
+		cur->callback(cur->id, &event_msg->event_data,
+			      cur->callback_data);
+	}
+	rcu_read_unlock();
+}
+
+/*
+ * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
+ * subscribers for given event.
+ */
+int vmci_event_dispatch(struct vmci_datagram *msg)
+{
+	struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
+
+	if (msg->payload_size < sizeof(u32) ||
+	    msg->payload_size > sizeof(struct vmci_event_data_max))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	if (!VMCI_EVENT_VALID(event_msg->event_data.event))
+		return VMCI_ERROR_EVENT_UNKNOWN;
+
+	event_deliver(event_msg);
+	return VMCI_SUCCESS;
+}
+
+/*
+ * vmci_event_subscribe() - Subscribe to a given event.
+ * @event:      The event to subscribe to.
+ * @callback:   The callback to invoke upon the event.
+ * @callback_data:      Data to pass to the callback.
+ * @subscription_id:    ID used to track subscription.  Used with
+ *              vmci_event_unsubscribe()
+ *
+ * Subscribes to the provided event. The callback specified will be
+ * fired from RCU critical section and therefore must not sleep.
+ */
+int vmci_event_subscribe(u32 event,
+			 vmci_event_cb callback,
+			 void *callback_data,
+			 u32 *new_subscription_id)
+{
+	struct vmci_subscription *sub;
+	int attempts;
+	int retval;
+	bool have_new_id = false;
+
+	if (!new_subscription_id) {
+		pr_devel("%s: Invalid subscription (NULL)\n", __func__);
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	if (!VMCI_EVENT_VALID(event) || !callback) {
+		pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
+			 __func__, event, callback, callback_data);
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	sub = kzalloc(sizeof(*sub), GFP_KERNEL);
+	if (!sub)
+		return VMCI_ERROR_NO_MEM;
+
+	sub->id = VMCI_EVENT_MAX;
+	sub->event = event;
+	sub->callback = callback;
+	sub->callback_data = callback_data;
+	INIT_LIST_HEAD(&sub->node);
+
+	mutex_lock(&subscriber_mutex);
+
+	/* Creation of a new event is always allowed. */
+	for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
+		static u32 subscription_id;
+		/*
+		 * We try to get an id a couple of time before
+		 * claiming we are out of resources.
+		 */
+
+		/* Test for duplicate id. */
+		if (!event_find(++subscription_id)) {
+			sub->id = subscription_id;
+			have_new_id = true;
+			break;
+		}
+	}
+
+	if (have_new_id) {
+		list_add_rcu(&sub->node, &subscriber_array[event]);
+		retval = VMCI_SUCCESS;
+	} else {
+		retval = VMCI_ERROR_NO_RESOURCES;
+	}
+
+	mutex_unlock(&subscriber_mutex);
+
+	*new_subscription_id = sub->id;
+	return retval;
+}
+EXPORT_SYMBOL_GPL(vmci_event_subscribe);
+
+/*
+ * vmci_event_unsubscribe() - unsubscribe from an event.
+ * @sub_id:     A subscription ID as provided by vmci_event_subscribe()
+ *
+ * Unsubscribe from given event. Removes it from list and frees it.
+ * Will return callback_data if requested by caller.
+ */
+int vmci_event_unsubscribe(u32 sub_id)
+{
+	struct vmci_subscription *s;
+
+	mutex_lock(&subscriber_mutex);
+	s = event_find(sub_id);
+	if (s)
+		list_del_rcu(&s->node);
+	mutex_unlock(&subscriber_mutex);
+
+	if (!s)
+		return VMCI_ERROR_NOT_FOUND;
+
+	synchronize_rcu();
+	kfree(s);
+
+	return VMCI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);
diff --git a/drivers/misc/vmw_vmci/vmci_event.h b/drivers/misc/vmw_vmci/vmci_event.h
new file mode 100644
index 0000000..7df9b1c
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_event.h
@@ -0,0 +1,25 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __VMCI_EVENT_H__
+#define __VMCI_EVENT_H__
+
+#include <linux/vmw_vmci_api.h>
+
+int vmci_event_init(void);
+void vmci_event_exit(void);
+int vmci_event_dispatch(struct vmci_datagram *msg);
+
+#endif /*__VMCI_EVENT_H__ */
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
new file mode 100644
index 0000000..60c0199
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -0,0 +1,759 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define PCI_VENDOR_ID_VMWARE		0x15AD
+#define PCI_DEVICE_ID_VMWARE_VMCI	0x0740
+
+#define VMCI_UTIL_NUM_RESOURCES 1
+
+static bool vmci_disable_msi;
+module_param_named(disable_msi, vmci_disable_msi, bool, 0);
+MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
+
+static bool vmci_disable_msix;
+module_param_named(disable_msix, vmci_disable_msix, bool, 0);
+MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
+
+static u32 ctx_update_sub_id = VMCI_INVALID_ID;
+static u32 vm_context_id = VMCI_INVALID_ID;
+
+struct vmci_guest_device {
+	struct device *dev;	/* PCI device we are attached to */
+	void __iomem *iobase;
+
+	unsigned int irq;
+	unsigned int intr_type;
+	bool exclusive_vectors;
+	struct msix_entry msix_entries[VMCI_MAX_INTRS];
+
+	struct tasklet_struct datagram_tasklet;
+	struct tasklet_struct bm_tasklet;
+
+	void *data_buffer;
+	void *notification_bitmap;
+};
+
+/* vmci_dev singleton device and supporting data*/
+static struct vmci_guest_device *vmci_dev_g;
+static DEFINE_SPINLOCK(vmci_dev_spinlock);
+
+static atomic_t vmci_num_guest_devices = ATOMIC_INIT(0);
+
+bool vmci_guest_code_active(void)
+{
+	return atomic_read(&vmci_num_guest_devices) != 0;
+}
+
+u32 vmci_get_vm_context_id(void)
+{
+	if (vm_context_id == VMCI_INVALID_ID) {
+		struct vmci_datagram get_cid_msg;
+		get_cid_msg.dst =
+		    vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+				     VMCI_GET_CONTEXT_ID);
+		get_cid_msg.src = VMCI_ANON_SRC_HANDLE;
+		get_cid_msg.payload_size = 0;
+		vm_context_id = vmci_send_datagram(&get_cid_msg);
+	}
+	return vm_context_id;
+}
+
+/*
+ * VM to hypervisor call mechanism. We use the standard VMware naming
+ * convention since shared code is calling this function as well.
+ */
+int vmci_send_datagram(struct vmci_datagram *dg)
+{
+	unsigned long flags;
+	int result;
+
+	/* Check args. */
+	if (dg == NULL)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	/*
+	 * Need to acquire spinlock on the device because the datagram
+	 * data may be spread over multiple pages and the monitor may
+	 * interleave device user rpc calls from multiple
+	 * VCPUs. Acquiring the spinlock precludes that
+	 * possibility. Disabling interrupts to avoid incoming
+	 * datagrams during a "rep out" and possibly landing up in
+	 * this function.
+	 */
+	spin_lock_irqsave(&vmci_dev_spinlock, flags);
+
+	if (vmci_dev_g) {
+		iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
+			     dg, VMCI_DG_SIZE(dg));
+		result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
+	} else {
+		result = VMCI_ERROR_UNAVAILABLE;
+	}
+
+	spin_unlock_irqrestore(&vmci_dev_spinlock, flags);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_send_datagram);
+
+/*
+ * Gets called with the new context id if updated or resumed.
+ * Context id.
+ */
+static void vmci_guest_cid_update(u32 sub_id,
+				  const struct vmci_event_data *event_data,
+				  void *client_data)
+{
+	const struct vmci_event_payld_ctx *ev_payload =
+				vmci_event_data_const_payload(event_data);
+
+	if (sub_id != ctx_update_sub_id) {
+		pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id);
+		return;
+	}
+
+	if (!event_data || ev_payload->context_id == VMCI_INVALID_ID) {
+		pr_devel("Invalid event data\n");
+		return;
+	}
+
+	pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
+		 vm_context_id, ev_payload->context_id, event_data->event);
+
+	vm_context_id = ev_payload->context_id;
+}
+
+/*
+ * Verify that the host supports the hypercalls we need. If it does not,
+ * try to find fallback hypercalls and use those instead.  Returns
+ * true if required hypercalls (or fallback hypercalls) are
+ * supported by the host, false otherwise.
+ */
+static bool vmci_check_host_caps(struct pci_dev *pdev)
+{
+	bool result;
+	struct vmci_resource_query_msg *msg;
+	u32 msg_size = sizeof(struct vmci_resource_query_hdr) +
+				VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
+	struct vmci_datagram *check_msg;
+
+	check_msg = kmalloc(msg_size, GFP_KERNEL);
+	if (!check_msg) {
+		dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
+		return false;
+	}
+
+	check_msg->dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+					  VMCI_RESOURCES_QUERY);
+	check_msg->src = VMCI_ANON_SRC_HANDLE;
+	check_msg->payload_size = msg_size - VMCI_DG_HEADERSIZE;
+	msg = (struct vmci_resource_query_msg *)VMCI_DG_PAYLOAD(check_msg);
+
+	msg->num_resources = VMCI_UTIL_NUM_RESOURCES;
+	msg->resources[0] = VMCI_GET_CONTEXT_ID;
+
+	/* Checks that hyper calls are supported */
+	result = vmci_send_datagram(check_msg) == 0x01;
+	kfree(check_msg);
+
+	dev_dbg(&pdev->dev, "%s: Host capability check: %s\n",
+		__func__, result ? "PASSED" : "FAILED");
+
+	/* We need the vector. There are no fallbacks. */
+	return result;
+}
+
+/*
+ * Reads datagrams from the data in port and dispatches them. We
+ * always start reading datagrams into only the first page of the
+ * datagram buffer. If the datagrams don't fit into one page, we
+ * use the maximum datagram buffer size for the remainder of the
+ * invocation. This is a simple heuristic for not penalizing
+ * small datagrams.
+ *
+ * This function assumes that it has exclusive access to the data
+ * in port for the duration of the call.
+ */
+static void vmci_dispatch_dgs(unsigned long data)
+{
+	struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
+	u8 *dg_in_buffer = vmci_dev->data_buffer;
+	struct vmci_datagram *dg;
+	size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
+	size_t current_dg_in_buffer_size = PAGE_SIZE;
+	size_t remaining_bytes;
+
+	BUILD_BUG_ON(VMCI_MAX_DG_SIZE < PAGE_SIZE);
+
+	ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+		    vmci_dev->data_buffer, current_dg_in_buffer_size);
+	dg = (struct vmci_datagram *)dg_in_buffer;
+	remaining_bytes = current_dg_in_buffer_size;
+
+	while (dg->dst.resource != VMCI_INVALID_ID ||
+	       remaining_bytes > PAGE_SIZE) {
+		unsigned dg_in_size;
+
+		/*
+		 * When the input buffer spans multiple pages, a datagram can
+		 * start on any page boundary in the buffer.
+		 */
+		if (dg->dst.resource == VMCI_INVALID_ID) {
+			dg = (struct vmci_datagram *)roundup(
+				(uintptr_t)dg + 1, PAGE_SIZE);
+			remaining_bytes =
+				(size_t)(dg_in_buffer +
+					 current_dg_in_buffer_size -
+					 (u8 *)dg);
+			continue;
+		}
+
+		dg_in_size = VMCI_DG_SIZE_ALIGNED(dg);
+
+		if (dg_in_size <= dg_in_buffer_size) {
+			int result;
+
+			/*
+			 * If the remaining bytes in the datagram
+			 * buffer doesn't contain the complete
+			 * datagram, we first make sure we have enough
+			 * room for it and then we read the reminder
+			 * of the datagram and possibly any following
+			 * datagrams.
+			 */
+			if (dg_in_size > remaining_bytes) {
+				if (remaining_bytes !=
+				    current_dg_in_buffer_size) {
+
+					/*
+					 * We move the partial
+					 * datagram to the front and
+					 * read the reminder of the
+					 * datagram and possibly
+					 * following calls into the
+					 * following bytes.
+					 */
+					memmove(dg_in_buffer, dg_in_buffer +
+						current_dg_in_buffer_size -
+						remaining_bytes,
+						remaining_bytes);
+					dg = (struct vmci_datagram *)
+					    dg_in_buffer;
+				}
+
+				if (current_dg_in_buffer_size !=
+				    dg_in_buffer_size)
+					current_dg_in_buffer_size =
+					    dg_in_buffer_size;
+
+				ioread8_rep(vmci_dev->iobase +
+						VMCI_DATA_IN_ADDR,
+					vmci_dev->data_buffer +
+						remaining_bytes,
+					current_dg_in_buffer_size -
+						remaining_bytes);
+			}
+
+			/*
+			 * We special case event datagrams from the
+			 * hypervisor.
+			 */
+			if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID &&
+			    dg->dst.resource == VMCI_EVENT_HANDLER) {
+				result = vmci_event_dispatch(dg);
+			} else {
+				result = vmci_datagram_invoke_guest_handler(dg);
+			}
+			if (result < VMCI_SUCCESS)
+				dev_dbg(vmci_dev->dev,
+					"Datagram with resource (ID=0x%x) failed (err=%d)\n",
+					 dg->dst.resource, result);
+
+			/* On to the next datagram. */
+			dg = (struct vmci_datagram *)((u8 *)dg +
+						      dg_in_size);
+		} else {
+			size_t bytes_to_skip;
+
+			/*
+			 * Datagram doesn't fit in datagram buffer of maximal
+			 * size. We drop it.
+			 */
+			dev_dbg(vmci_dev->dev,
+				"Failed to receive datagram (size=%u bytes)\n",
+				 dg_in_size);
+
+			bytes_to_skip = dg_in_size - remaining_bytes;
+			if (current_dg_in_buffer_size != dg_in_buffer_size)
+				current_dg_in_buffer_size = dg_in_buffer_size;
+
+			for (;;) {
+				ioread8_rep(vmci_dev->iobase +
+						VMCI_DATA_IN_ADDR,
+					vmci_dev->data_buffer,
+					current_dg_in_buffer_size);
+				if (bytes_to_skip <= current_dg_in_buffer_size)
+					break;
+
+				bytes_to_skip -= current_dg_in_buffer_size;
+			}
+			dg = (struct vmci_datagram *)(dg_in_buffer +
+						      bytes_to_skip);
+		}
+
+		remaining_bytes =
+		    (size_t) (dg_in_buffer + current_dg_in_buffer_size -
+			      (u8 *)dg);
+
+		if (remaining_bytes < VMCI_DG_HEADERSIZE) {
+			/* Get the next batch of datagrams. */
+
+			ioread8_rep(vmci_dev->iobase + VMCI_DATA_IN_ADDR,
+				    vmci_dev->data_buffer,
+				    current_dg_in_buffer_size);
+			dg = (struct vmci_datagram *)dg_in_buffer;
+			remaining_bytes = current_dg_in_buffer_size;
+		}
+	}
+}
+
+/*
+ * Scans the notification bitmap for raised flags, clears them
+ * and handles the notifications.
+ */
+static void vmci_process_bitmap(unsigned long data)
+{
+	struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
+
+	if (!dev->notification_bitmap) {
+		dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
+		return;
+	}
+
+	vmci_dbell_scan_notification_entries(dev->notification_bitmap);
+}
+
+/*
+ * Enable MSI-X.  Try exclusive vectors first, then shared vectors.
+ */
+static int vmci_enable_msix(struct pci_dev *pdev,
+			    struct vmci_guest_device *vmci_dev)
+{
+	int i;
+	int result;
+
+	for (i = 0; i < VMCI_MAX_INTRS; ++i) {
+		vmci_dev->msix_entries[i].entry = i;
+		vmci_dev->msix_entries[i].vector = i;
+	}
+
+	result = pci_enable_msix(pdev, vmci_dev->msix_entries, VMCI_MAX_INTRS);
+	if (result == 0)
+		vmci_dev->exclusive_vectors = true;
+	else if (result > 0)
+		result = pci_enable_msix(pdev, vmci_dev->msix_entries, 1);
+
+	return result;
+}
+
+/*
+ * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
+ * interrupt (vector VMCI_INTR_DATAGRAM).
+ */
+static irqreturn_t vmci_interrupt(int irq, void *_dev)
+{
+	struct vmci_guest_device *dev = _dev;
+
+	/*
+	 * If we are using MSI-X with exclusive vectors then we simply schedule
+	 * the datagram tasklet, since we know the interrupt was meant for us.
+	 * Otherwise we must read the ICR to determine what to do.
+	 */
+
+	if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
+		tasklet_schedule(&dev->datagram_tasklet);
+	} else {
+		unsigned int icr;
+
+		/* Acknowledge interrupt and determine what needs doing. */
+		icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
+		if (icr == 0 || icr == ~0)
+			return IRQ_NONE;
+
+		if (icr & VMCI_ICR_DATAGRAM) {
+			tasklet_schedule(&dev->datagram_tasklet);
+			icr &= ~VMCI_ICR_DATAGRAM;
+		}
+
+		if (icr & VMCI_ICR_NOTIFICATION) {
+			tasklet_schedule(&dev->bm_tasklet);
+			icr &= ~VMCI_ICR_NOTIFICATION;
+		}
+
+		if (icr != 0)
+			dev_warn(dev->dev,
+				 "Ignoring unknown interrupt cause (%d)\n",
+				 icr);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
+ * which is for the notification bitmap.  Will only get called if we are
+ * using MSI-X with exclusive vectors.
+ */
+static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
+{
+	struct vmci_guest_device *dev = _dev;
+
+	/* For MSI-X we can just assume it was meant for us. */
+	tasklet_schedule(&dev->bm_tasklet);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * Most of the initialization at module load time is done here.
+ */
+static int vmci_guest_probe_device(struct pci_dev *pdev,
+				   const struct pci_device_id *id)
+{
+	struct vmci_guest_device *vmci_dev;
+	void __iomem *iobase;
+	unsigned int capabilities;
+	unsigned long cmd;
+	int vmci_err;
+	int error;
+
+	dev_dbg(&pdev->dev, "Probing for vmci/PCI guest device\n");
+
+	error = pcim_enable_device(pdev);
+	if (error) {
+		dev_err(&pdev->dev,
+			"Failed to enable VMCI device: %d\n", error);
+		return error;
+	}
+
+	error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
+	if (error) {
+		dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
+		return error;
+	}
+
+	iobase = pcim_iomap_table(pdev)[0];
+
+	dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
+		 (unsigned long)iobase, pdev->irq);
+
+	vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
+	if (!vmci_dev) {
+		dev_err(&pdev->dev,
+			"Can't allocate memory for VMCI device\n");
+		return -ENOMEM;
+	}
+
+	vmci_dev->dev = &pdev->dev;
+	vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
+	vmci_dev->exclusive_vectors = false;
+	vmci_dev->iobase = iobase;
+
+	tasklet_init(&vmci_dev->datagram_tasklet,
+		     vmci_dispatch_dgs, (unsigned long)vmci_dev);
+	tasklet_init(&vmci_dev->bm_tasklet,
+		     vmci_process_bitmap, (unsigned long)vmci_dev);
+
+	vmci_dev->data_buffer = vmalloc(VMCI_MAX_DG_SIZE);
+	if (!vmci_dev->data_buffer) {
+		dev_err(&pdev->dev,
+			"Can't allocate memory for datagram buffer\n");
+		return -ENOMEM;
+	}
+
+	pci_set_master(pdev);	/* To enable queue_pair functionality. */
+
+	/*
+	 * Verify that the VMCI Device supports the capabilities that
+	 * we need. If the device is missing capabilities that we would
+	 * like to use, check for fallback capabilities and use those
+	 * instead (so we can run a new VM on old hosts). Fail the load if
+	 * a required capability is missing and there is no fallback.
+	 *
+	 * Right now, we need datagrams. There are no fallbacks.
+	 */
+	capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
+	if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
+		dev_err(&pdev->dev, "Device does not support datagrams\n");
+		error = -ENXIO;
+		goto err_free_data_buffer;
+	}
+
+	/*
+	 * If the hardware supports notifications, we will use that as
+	 * well.
+	 */
+	if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+		vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE);
+		if (!vmci_dev->notification_bitmap) {
+			dev_warn(&pdev->dev,
+				 "Unable to allocate notification bitmap\n");
+		} else {
+			memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE);
+			capabilities |= VMCI_CAPS_NOTIFICATIONS;
+		}
+	}
+
+	dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities);
+
+	/* Let the host know which capabilities we intend to use. */
+	iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR);
+
+	/* Set up global device so that we can start sending datagrams */
+	spin_lock_irq(&vmci_dev_spinlock);
+	vmci_dev_g = vmci_dev;
+	spin_unlock_irq(&vmci_dev_spinlock);
+
+	/*
+	 * Register notification bitmap with device if that capability is
+	 * used.
+	 */
+	if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
+		struct page *page =
+			vmalloc_to_page(vmci_dev->notification_bitmap);
+		unsigned long bitmap_ppn = page_to_pfn(page);
+		if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
+			dev_warn(&pdev->dev,
+				 "VMCI device unable to register notification bitmap with PPN 0x%x\n",
+				 (u32) bitmap_ppn);
+			goto err_remove_vmci_dev_g;
+		}
+	}
+
+	/* Check host capabilities. */
+	if (!vmci_check_host_caps(pdev))
+		goto err_remove_bitmap;
+
+	/* Enable device. */
+
+	/*
+	 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
+	 * update the internal context id when needed.
+	 */
+	vmci_err = vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE,
+					vmci_guest_cid_update, NULL,
+					&ctx_update_sub_id);
+	if (vmci_err < VMCI_SUCCESS)
+		dev_warn(&pdev->dev,
+			 "Failed to subscribe to event (type=%d): %d\n",
+			 VMCI_EVENT_CTX_ID_UPDATE, vmci_err);
+
+	/*
+	 * Enable interrupts.  Try MSI-X first, then MSI, and then fallback on
+	 * legacy interrupts.
+	 */
+	if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
+		vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
+		vmci_dev->irq = vmci_dev->msix_entries[0].vector;
+	} else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
+		vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
+		vmci_dev->irq = pdev->irq;
+	} else {
+		vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
+		vmci_dev->irq = pdev->irq;
+	}
+
+	/*
+	 * Request IRQ for legacy or MSI interrupts, or for first
+	 * MSI-X vector.
+	 */
+	error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
+			    KBUILD_MODNAME, vmci_dev);
+	if (error) {
+		dev_err(&pdev->dev, "Irq %u in use: %d\n",
+			vmci_dev->irq, error);
+		goto err_disable_msi;
+	}
+
+	/*
+	 * For MSI-X with exclusive vectors we need to request an
+	 * interrupt for each vector so that we get a separate
+	 * interrupt handler routine.  This allows us to distinguish
+	 * between the vectors.
+	 */
+	if (vmci_dev->exclusive_vectors) {
+		error = request_irq(vmci_dev->msix_entries[1].vector,
+				    vmci_interrupt_bm, 0, KBUILD_MODNAME,
+				    vmci_dev);
+		if (error) {
+			dev_err(&pdev->dev,
+				"Failed to allocate irq %u: %d\n",
+				vmci_dev->msix_entries[1].vector, error);
+			goto err_free_irq;
+		}
+	}
+
+	dev_dbg(&pdev->dev, "Registered device\n");
+
+	atomic_inc(&vmci_num_guest_devices);
+
+	/* Enable specific interrupt bits. */
+	cmd = VMCI_IMR_DATAGRAM;
+	if (capabilities & VMCI_CAPS_NOTIFICATIONS)
+		cmd |= VMCI_IMR_NOTIFICATION;
+	iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
+
+	/* Enable interrupts. */
+	iowrite32(VMCI_CONTROL_INT_ENABLE,
+		  vmci_dev->iobase + VMCI_CONTROL_ADDR);
+
+	pci_set_drvdata(pdev, vmci_dev);
+	return 0;
+
+err_free_irq:
+	free_irq(vmci_dev->irq, &vmci_dev);
+	tasklet_kill(&vmci_dev->datagram_tasklet);
+	tasklet_kill(&vmci_dev->bm_tasklet);
+
+err_disable_msi:
+	if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
+		pci_disable_msix(pdev);
+	else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
+		pci_disable_msi(pdev);
+
+	vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
+	if (vmci_err < VMCI_SUCCESS)
+		dev_warn(&pdev->dev,
+			 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
+			 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
+
+err_remove_bitmap:
+	if (vmci_dev->notification_bitmap) {
+		iowrite32(VMCI_CONTROL_RESET,
+			  vmci_dev->iobase + VMCI_CONTROL_ADDR);
+		vfree(vmci_dev->notification_bitmap);
+	}
+
+err_remove_vmci_dev_g:
+	spin_lock_irq(&vmci_dev_spinlock);
+	vmci_dev_g = NULL;
+	spin_unlock_irq(&vmci_dev_spinlock);
+
+err_free_data_buffer:
+	vfree(vmci_dev->data_buffer);
+
+	/* The rest are managed resources and will be freed by PCI core */
+	return error;
+}
+
+static void vmci_guest_remove_device(struct pci_dev *pdev)
+{
+	struct vmci_guest_device *vmci_dev = pci_get_drvdata(pdev);
+	int vmci_err;
+
+	dev_dbg(&pdev->dev, "Removing device\n");
+
+	atomic_dec(&vmci_num_guest_devices);
+
+	vmci_qp_guest_endpoints_exit();
+
+	vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
+	if (vmci_err < VMCI_SUCCESS)
+		dev_warn(&pdev->dev,
+			 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
+			 VMCI_EVENT_CTX_ID_UPDATE, ctx_update_sub_id, vmci_err);
+
+	spin_lock_irq(&vmci_dev_spinlock);
+	vmci_dev_g = NULL;
+	spin_unlock_irq(&vmci_dev_spinlock);
+
+	dev_dbg(&pdev->dev, "Resetting vmci device\n");
+	iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
+
+	/*
+	 * Free IRQ and then disable MSI/MSI-X as appropriate.  For
+	 * MSI-X, we might have multiple vectors, each with their own
+	 * IRQ, which we must free too.
+	 */
+	free_irq(vmci_dev->irq, vmci_dev);
+	if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
+		if (vmci_dev->exclusive_vectors)
+			free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
+		pci_disable_msix(pdev);
+	} else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
+		pci_disable_msi(pdev);
+	}
+
+	tasklet_kill(&vmci_dev->datagram_tasklet);
+	tasklet_kill(&vmci_dev->bm_tasklet);
+
+	if (vmci_dev->notification_bitmap) {
+		/*
+		 * The device reset above cleared the bitmap state of the
+		 * device, so we can safely free it here.
+		 */
+
+		vfree(vmci_dev->notification_bitmap);
+	}
+
+	vfree(vmci_dev->data_buffer);
+
+	/* The rest are managed resources and will be freed by PCI core */
+}
+
+static DEFINE_PCI_DEVICE_TABLE(vmci_ids) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_VMCI), },
+	{ 0 },
+};
+MODULE_DEVICE_TABLE(pci, vmci_ids);
+
+static struct pci_driver vmci_guest_driver = {
+	.name		= KBUILD_MODNAME,
+	.id_table	= vmci_ids,
+	.probe		= vmci_guest_probe_device,
+	.remove		= vmci_guest_remove_device,
+};
+
+int __init vmci_guest_init(void)
+{
+	return pci_register_driver(&vmci_guest_driver);
+}
+
+void __exit vmci_guest_exit(void)
+{
+	pci_unregister_driver(&vmci_guest_driver);
+}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c
new file mode 100644
index 0000000..344973a
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.c
@@ -0,0 +1,142 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/slab.h>
+#include "vmci_handle_array.h"
+
+static size_t handle_arr_calc_size(size_t capacity)
+{
+	return sizeof(struct vmci_handle_arr) +
+	    capacity * sizeof(struct vmci_handle);
+}
+
+struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity)
+{
+	struct vmci_handle_arr *array;
+
+	if (capacity == 0)
+		capacity = VMCI_HANDLE_ARRAY_DEFAULT_SIZE;
+
+	array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC);
+	if (!array)
+		return NULL;
+
+	array->capacity = capacity;
+	array->size = 0;
+
+	return array;
+}
+
+void vmci_handle_arr_destroy(struct vmci_handle_arr *array)
+{
+	kfree(array);
+}
+
+void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+				  struct vmci_handle handle)
+{
+	struct vmci_handle_arr *array = *array_ptr;
+
+	if (unlikely(array->size >= array->capacity)) {
+		/* reallocate. */
+		struct vmci_handle_arr *new_array;
+		size_t new_capacity = array->capacity * VMCI_ARR_CAP_MULT;
+		size_t new_size = handle_arr_calc_size(new_capacity);
+
+		new_array = krealloc(array, new_size, GFP_ATOMIC);
+		if (!new_array)
+			return;
+
+		new_array->capacity = new_capacity;
+		*array_ptr = array = new_array;
+	}
+
+	array->entries[array->size] = handle;
+	array->size++;
+}
+
+/*
+ * Handle that was removed, VMCI_INVALID_HANDLE if entry not found.
+ */
+struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+						struct vmci_handle entry_handle)
+{
+	struct vmci_handle handle = VMCI_INVALID_HANDLE;
+	size_t i;
+
+	for (i = 0; i < array->size; i++) {
+		if (vmci_handle_is_equal(array->entries[i], entry_handle)) {
+			handle = array->entries[i];
+			array->size--;
+			array->entries[i] = array->entries[array->size];
+			array->entries[array->size] = VMCI_INVALID_HANDLE;
+			break;
+		}
+	}
+
+	return handle;
+}
+
+/*
+ * Handle that was removed, VMCI_INVALID_HANDLE if array was empty.
+ */
+struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array)
+{
+	struct vmci_handle handle = VMCI_INVALID_HANDLE;
+
+	if (array->size) {
+		array->size--;
+		handle = array->entries[array->size];
+		array->entries[array->size] = VMCI_INVALID_HANDLE;
+	}
+
+	return handle;
+}
+
+/*
+ * Handle at given index, VMCI_INVALID_HANDLE if invalid index.
+ */
+struct vmci_handle
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index)
+{
+	if (unlikely(index >= array->size))
+		return VMCI_INVALID_HANDLE;
+
+	return array->entries[index];
+}
+
+bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+			       struct vmci_handle entry_handle)
+{
+	size_t i;
+
+	for (i = 0; i < array->size; i++)
+		if (vmci_handle_is_equal(array->entries[i], entry_handle))
+			return true;
+
+	return false;
+}
+
+/*
+ * NULL if the array is empty. Otherwise, a pointer to the array
+ * of VMCI handles in the handle array.
+ */
+struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array)
+{
+	if (array->size)
+		return array->entries;
+
+	return NULL;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h
new file mode 100644
index 0000000..b5f3a7f
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_handle_array.h
@@ -0,0 +1,52 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_HANDLE_ARRAY_H_
+#define _VMCI_HANDLE_ARRAY_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#define VMCI_HANDLE_ARRAY_DEFAULT_SIZE 4
+#define VMCI_ARR_CAP_MULT 2	/* Array capacity multiplier */
+
+struct vmci_handle_arr {
+	size_t capacity;
+	size_t size;
+	struct vmci_handle entries[];
+};
+
+struct vmci_handle_arr *vmci_handle_arr_create(size_t capacity);
+void vmci_handle_arr_destroy(struct vmci_handle_arr *array);
+void vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr,
+				  struct vmci_handle handle);
+struct vmci_handle vmci_handle_arr_remove_entry(struct vmci_handle_arr *array,
+						struct vmci_handle
+						entry_handle);
+struct vmci_handle vmci_handle_arr_remove_tail(struct vmci_handle_arr *array);
+struct vmci_handle
+vmci_handle_arr_get_entry(const struct vmci_handle_arr *array, size_t index);
+bool vmci_handle_arr_has_entry(const struct vmci_handle_arr *array,
+			       struct vmci_handle entry_handle);
+struct vmci_handle *vmci_handle_arr_get_handles(struct vmci_handle_arr *array);
+
+static inline size_t vmci_handle_arr_get_size(
+	const struct vmci_handle_arr *array)
+{
+	return array->size;
+}
+
+
+#endif /* _VMCI_HANDLE_ARRAY_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
new file mode 100644
index 0000000..d4722b3
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -0,0 +1,1043 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/moduleparam.h>
+#include <linux/miscdevice.h>
+#include <linux/interrupt.h>
+#include <linux/highmem.h>
+#include <linux/atomic.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/pci.h>
+#include <linux/smp.h>
+#include <linux/fs.h>
+#include <linux/io.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_doorbell.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+
+#define VMCI_UTIL_NUM_RESOURCES 1
+
+enum {
+	VMCI_NOTIFY_RESOURCE_QUEUE_PAIR = 0,
+	VMCI_NOTIFY_RESOURCE_DOOR_BELL = 1,
+};
+
+enum {
+	VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY = 0,
+	VMCI_NOTIFY_RESOURCE_ACTION_CREATE = 1,
+	VMCI_NOTIFY_RESOURCE_ACTION_DESTROY = 2,
+};
+
+/*
+ * VMCI driver initialization. This block can also be used to
+ * pass initial group membership etc.
+ */
+struct vmci_init_blk {
+	u32 cid;
+	u32 flags;
+};
+
+/* VMCIqueue_pairAllocInfo_VMToVM */
+struct vmci_qp_alloc_info_vmvm {
+	struct vmci_handle handle;
+	u32 peer;
+	u32 flags;
+	u64 produce_size;
+	u64 consume_size;
+	u64 produce_page_file;	  /* User VA. */
+	u64 consume_page_file;	  /* User VA. */
+	u64 produce_page_file_size;  /* Size of the file name array. */
+	u64 consume_page_file_size;  /* Size of the file name array. */
+	s32 result;
+	u32 _pad;
+};
+
+/* VMCISetNotifyInfo: Used to pass notify flag's address to the host driver. */
+struct vmci_set_notify_info {
+	u64 notify_uva;
+	s32 result;
+	u32 _pad;
+};
+
+/*
+ * Per-instance host state
+ */
+struct vmci_host_dev {
+	struct vmci_ctx *context;
+	int user_version;
+	enum vmci_obj_type ct_type;
+	struct mutex lock;  /* Mutex lock for vmci context access */
+};
+
+static struct vmci_ctx *host_context;
+static bool vmci_host_device_initialized;
+static atomic_t vmci_host_active_users = ATOMIC_INIT(0);
+
+/*
+ * Determines whether the VMCI host personality is
+ * available. Since the core functionality of the host driver is
+ * always present, all guests could possibly use the host
+ * personality. However, to minimize the deviation from the
+ * pre-unified driver state of affairs, we only consider the host
+ * device active if there is no active guest device or if there
+ * are VMX'en with active VMCI contexts using the host device.
+ */
+bool vmci_host_code_active(void)
+{
+	return vmci_host_device_initialized &&
+	    (!vmci_guest_code_active() ||
+	     atomic_read(&vmci_host_active_users) > 0);
+}
+
+/*
+ * Called on open of /dev/vmci.
+ */
+static int vmci_host_open(struct inode *inode, struct file *filp)
+{
+	struct vmci_host_dev *vmci_host_dev;
+
+	vmci_host_dev = kzalloc(sizeof(struct vmci_host_dev), GFP_KERNEL);
+	if (vmci_host_dev == NULL)
+		return -ENOMEM;
+
+	vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
+	mutex_init(&vmci_host_dev->lock);
+	filp->private_data = vmci_host_dev;
+
+	return 0;
+}
+
+/*
+ * Called on close of /dev/vmci, most often when the process
+ * exits.
+ */
+static int vmci_host_close(struct inode *inode, struct file *filp)
+{
+	struct vmci_host_dev *vmci_host_dev = filp->private_data;
+
+	if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+		vmci_ctx_destroy(vmci_host_dev->context);
+		vmci_host_dev->context = NULL;
+
+		/*
+		 * The number of active contexts is used to track whether any
+		 * VMX'en are using the host personality. It is incremented when
+		 * a context is created through the IOCTL_VMCI_INIT_CONTEXT
+		 * ioctl.
+		 */
+		atomic_dec(&vmci_host_active_users);
+	}
+	vmci_host_dev->ct_type = VMCIOBJ_NOT_SET;
+
+	kfree(vmci_host_dev);
+	filp->private_data = NULL;
+	return 0;
+}
+
+/*
+ * This is used to wake up the VMX when a VMCI call arrives, or
+ * to wake up select() or poll() at the next clock tick.
+ */
+static unsigned int vmci_host_poll(struct file *filp, poll_table *wait)
+{
+	struct vmci_host_dev *vmci_host_dev = filp->private_data;
+	struct vmci_ctx *context = vmci_host_dev->context;
+	unsigned int mask = 0;
+
+	if (vmci_host_dev->ct_type == VMCIOBJ_CONTEXT) {
+		/* Check for VMCI calls to this VM context. */
+		if (wait)
+			poll_wait(filp, &context->host_context.wait_queue,
+				  wait);
+
+		spin_lock(&context->lock);
+		if (context->pending_datagrams > 0 ||
+		    vmci_handle_arr_get_size(
+				context->pending_doorbell_array) > 0) {
+			mask = POLLIN;
+		}
+		spin_unlock(&context->lock);
+	}
+	return mask;
+}
+
+/*
+ * Copies the handles of a handle array into a user buffer, and
+ * returns the new length in userBufferSize. If the copy to the
+ * user buffer fails, the functions still returns VMCI_SUCCESS,
+ * but retval != 0.
+ */
+static int drv_cp_harray_to_user(void __user *user_buf_uva,
+				 u64 *user_buf_size,
+				 struct vmci_handle_arr *handle_array,
+				 int *retval)
+{
+	u32 array_size = 0;
+	struct vmci_handle *handles;
+
+	if (handle_array)
+		array_size = vmci_handle_arr_get_size(handle_array);
+
+	if (array_size * sizeof(*handles) > *user_buf_size)
+		return VMCI_ERROR_MORE_DATA;
+
+	*user_buf_size = array_size * sizeof(*handles);
+	if (*user_buf_size)
+		*retval = copy_to_user(user_buf_uva,
+				       vmci_handle_arr_get_handles
+				       (handle_array), *user_buf_size);
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Sets up a given context for notify to work.  Calls drv_map_bool_ptr()
+ * which maps the notify boolean in user VA in kernel space.
+ */
+static int vmci_host_setup_notify(struct vmci_ctx *context,
+				  unsigned long uva)
+{
+	struct page *page;
+	int retval;
+
+	if (context->notify_page) {
+		pr_devel("%s: Notify mechanism is already set up\n", __func__);
+		return VMCI_ERROR_DUPLICATE_ENTRY;
+	}
+
+	/*
+	 * We are using 'bool' internally, but let's make sure we explicit
+	 * about the size.
+	 */
+	BUILD_BUG_ON(sizeof(bool) != sizeof(u8));
+	if (!access_ok(VERIFY_WRITE, (void __user *)uva, sizeof(u8)))
+		return VMCI_ERROR_GENERIC;
+
+	/*
+	 * Lock physical page backing a given user VA.
+	 */
+	down_read(&current->mm->mmap_sem);
+	retval = get_user_pages(current, current->mm,
+				PAGE_ALIGN(uva),
+				1, 1, 0, &page, NULL);
+	up_read(&current->mm->mmap_sem);
+	if (retval != 1)
+		return VMCI_ERROR_GENERIC;
+
+	/*
+	 * Map the locked page and set up notify pointer.
+	 */
+	context->notify = kmap(page) + (uva & (PAGE_SIZE - 1));
+	vmci_ctx_check_signal_notify(context);
+
+	return VMCI_SUCCESS;
+}
+
+static int vmci_host_get_version(struct vmci_host_dev *vmci_host_dev,
+				 unsigned int cmd, void __user *uptr)
+{
+	if (cmd == IOCTL_VMCI_VERSION2) {
+		int __user *vptr = uptr;
+		if (get_user(vmci_host_dev->user_version, vptr))
+			return -EFAULT;
+	}
+
+	/*
+	 * The basic logic here is:
+	 *
+	 * If the user sends in a version of 0 tell it our version.
+	 * If the user didn't send in a version, tell it our version.
+	 * If the user sent in an old version, tell it -its- version.
+	 * If the user sent in an newer version, tell it our version.
+	 *
+	 * The rationale behind telling the caller its version is that
+	 * Workstation 6.5 required that VMX and VMCI kernel module were
+	 * version sync'd.  All new VMX users will be programmed to
+	 * handle the VMCI kernel module version.
+	 */
+
+	if (vmci_host_dev->user_version > 0 &&
+	    vmci_host_dev->user_version < VMCI_VERSION_HOSTQP) {
+		return vmci_host_dev->user_version;
+	}
+
+	return VMCI_VERSION;
+}
+
+#define vmci_ioctl_err(fmt, ...)	\
+	pr_devel("%s: " fmt, ioctl_name, ##__VA_ARGS__)
+
+static int vmci_host_do_init_context(struct vmci_host_dev *vmci_host_dev,
+				     const char *ioctl_name,
+				     void __user *uptr)
+{
+	struct vmci_init_blk init_block;
+	const struct cred *cred;
+	int retval;
+
+	if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
+		vmci_ioctl_err("error reading init block\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&vmci_host_dev->lock);
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_NOT_SET) {
+		vmci_ioctl_err("received VMCI init on initialized handle\n");
+		retval = -EINVAL;
+		goto out;
+	}
+
+	if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
+		vmci_ioctl_err("unsupported VMCI restriction flag\n");
+		retval = -EINVAL;
+		goto out;
+	}
+
+	cred = get_current_cred();
+	vmci_host_dev->context = vmci_ctx_create(init_block.cid,
+						 init_block.flags, 0,
+						 vmci_host_dev->user_version,
+						 cred);
+	put_cred(cred);
+	if (IS_ERR(vmci_host_dev->context)) {
+		retval = PTR_ERR(vmci_host_dev->context);
+		vmci_ioctl_err("error initializing context\n");
+		goto out;
+	}
+
+	/*
+	 * Copy cid to userlevel, we do this to allow the VMX
+	 * to enforce its policy on cid generation.
+	 */
+	init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
+	if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
+		vmci_ctx_destroy(vmci_host_dev->context);
+		vmci_host_dev->context = NULL;
+		vmci_ioctl_err("error writing init block\n");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	vmci_host_dev->ct_type = VMCIOBJ_CONTEXT;
+	atomic_inc(&vmci_host_active_users);
+
+	retval = 0;
+
+out:
+	mutex_unlock(&vmci_host_dev->lock);
+	return retval;
+}
+
+static int vmci_host_do_send_datagram(struct vmci_host_dev *vmci_host_dev,
+				      const char *ioctl_name,
+				      void __user *uptr)
+{
+	struct vmci_datagram_snd_rcv_info send_info;
+	struct vmci_datagram *dg = NULL;
+	u32 cid;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&send_info, uptr, sizeof(send_info)))
+		return -EFAULT;
+
+	if (send_info.len > VMCI_MAX_DG_SIZE) {
+		vmci_ioctl_err("datagram is too big (size=%d)\n",
+			       send_info.len);
+		return -EINVAL;
+	}
+
+	if (send_info.len < sizeof(*dg)) {
+		vmci_ioctl_err("datagram is too small (size=%d)\n",
+			       send_info.len);
+		return -EINVAL;
+	}
+
+	dg = kmalloc(send_info.len, GFP_KERNEL);
+	if (!dg) {
+		vmci_ioctl_err(
+			"cannot allocate memory to dispatch datagram\n");
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(dg, (void __user *)(uintptr_t)send_info.addr,
+			   send_info.len)) {
+		vmci_ioctl_err("error getting datagram\n");
+		kfree(dg);
+		return -EFAULT;
+	}
+
+	pr_devel("Datagram dst (handle=0x%x:0x%x) src (handle=0x%x:0x%x), payload (size=%llu bytes)\n",
+		 dg->dst.context, dg->dst.resource,
+		 dg->src.context, dg->src.resource,
+		 (unsigned long long)dg->payload_size);
+
+	/* Get source context id. */
+	cid = vmci_ctx_get_id(vmci_host_dev->context);
+	send_info.result = vmci_datagram_dispatch(cid, dg, true);
+	kfree(dg);
+
+	return copy_to_user(uptr, &send_info, sizeof(send_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_receive_datagram(struct vmci_host_dev *vmci_host_dev,
+					 const char *ioctl_name,
+					 void __user *uptr)
+{
+	struct vmci_datagram_snd_rcv_info recv_info;
+	struct vmci_datagram *dg = NULL;
+	int retval;
+	size_t size;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&recv_info, uptr, sizeof(recv_info)))
+		return -EFAULT;
+
+	size = recv_info.len;
+	recv_info.result = vmci_ctx_dequeue_datagram(vmci_host_dev->context,
+						     &size, &dg);
+
+	if (recv_info.result >= VMCI_SUCCESS) {
+		void __user *ubuf = (void __user *)(uintptr_t)recv_info.addr;
+		retval = copy_to_user(ubuf, dg, VMCI_DG_SIZE(dg));
+		kfree(dg);
+		if (retval != 0)
+			return -EFAULT;
+	}
+
+	return copy_to_user(uptr, &recv_info, sizeof(recv_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
+					const char *ioctl_name,
+					void __user *uptr)
+{
+	struct vmci_handle handle;
+	int vmci_status;
+	int __user *retptr;
+	u32 cid;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+	if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+		struct vmci_qp_alloc_info_vmvm alloc_info;
+		struct vmci_qp_alloc_info_vmvm __user *info = uptr;
+
+		if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
+			return -EFAULT;
+
+		handle = alloc_info.handle;
+		retptr = &info->result;
+
+		vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
+						alloc_info.peer,
+						alloc_info.flags,
+						VMCI_NO_PRIVILEGE_FLAGS,
+						alloc_info.produce_size,
+						alloc_info.consume_size,
+						NULL,
+						vmci_host_dev->context);
+
+		if (vmci_status == VMCI_SUCCESS)
+			vmci_status = VMCI_SUCCESS_QUEUEPAIR_CREATE;
+	} else {
+		struct vmci_qp_alloc_info alloc_info;
+		struct vmci_qp_alloc_info __user *info = uptr;
+		struct vmci_qp_page_store page_store;
+
+		if (copy_from_user(&alloc_info, uptr, sizeof(alloc_info)))
+			return -EFAULT;
+
+		handle = alloc_info.handle;
+		retptr = &info->result;
+
+		page_store.pages = alloc_info.ppn_va;
+		page_store.len = alloc_info.num_ppns;
+
+		vmci_status = vmci_qp_broker_alloc(alloc_info.handle,
+						alloc_info.peer,
+						alloc_info.flags,
+						VMCI_NO_PRIVILEGE_FLAGS,
+						alloc_info.produce_size,
+						alloc_info.consume_size,
+						&page_store,
+						vmci_host_dev->context);
+	}
+
+	if (put_user(vmci_status, retptr)) {
+		if (vmci_status >= VMCI_SUCCESS) {
+			vmci_status = vmci_qp_broker_detach(handle,
+							vmci_host_dev->context);
+		}
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int vmci_host_do_queuepair_setva(struct vmci_host_dev *vmci_host_dev,
+					const char *ioctl_name,
+					void __user *uptr)
+{
+	struct vmci_qp_set_va_info set_va_info;
+	struct vmci_qp_set_va_info __user *info = uptr;
+	s32 result;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+		vmci_ioctl_err("is not allowed\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&set_va_info, uptr, sizeof(set_va_info)))
+		return -EFAULT;
+
+	if (set_va_info.va) {
+		/*
+		 * VMX is passing down a new VA for the queue
+		 * pair mapping.
+		 */
+		result = vmci_qp_broker_map(set_va_info.handle,
+					    vmci_host_dev->context,
+					    set_va_info.va);
+	} else {
+		/*
+		 * The queue pair is about to be unmapped by
+		 * the VMX.
+		 */
+		result = vmci_qp_broker_unmap(set_va_info.handle,
+					 vmci_host_dev->context, 0);
+	}
+
+	return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_queuepair_setpf(struct vmci_host_dev *vmci_host_dev,
+					const char *ioctl_name,
+					void __user *uptr)
+{
+	struct vmci_qp_page_file_info page_file_info;
+	struct vmci_qp_page_file_info __user *info = uptr;
+	s32 result;
+
+	if (vmci_host_dev->user_version < VMCI_VERSION_HOSTQP ||
+	    vmci_host_dev->user_version >= VMCI_VERSION_NOVMVM) {
+		vmci_ioctl_err("not supported on this VMX (version=%d)\n",
+			       vmci_host_dev->user_version);
+		return -EINVAL;
+	}
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&page_file_info, uptr, sizeof(*info)))
+		return -EFAULT;
+
+	/*
+	 * Communicate success pre-emptively to the caller.  Note that the
+	 * basic premise is that it is incumbent upon the caller not to look at
+	 * the info.result field until after the ioctl() returns.  And then,
+	 * only if the ioctl() result indicates no error.  We send up the
+	 * SUCCESS status before calling SetPageStore() store because failing
+	 * to copy up the result code means unwinding the SetPageStore().
+	 *
+	 * It turns out the logic to unwind a SetPageStore() opens a can of
+	 * worms.  For example, if a host had created the queue_pair and a
+	 * guest attaches and SetPageStore() is successful but writing success
+	 * fails, then ... the host has to be stopped from writing (anymore)
+	 * data into the queue_pair.  That means an additional test in the
+	 * VMCI_Enqueue() code path.  Ugh.
+	 */
+
+	if (put_user(VMCI_SUCCESS, &info->result)) {
+		/*
+		 * In this case, we can't write a result field of the
+		 * caller's info block.  So, we don't even try to
+		 * SetPageStore().
+		 */
+		return -EFAULT;
+	}
+
+	result = vmci_qp_broker_set_page_store(page_file_info.handle,
+						page_file_info.produce_va,
+						page_file_info.consume_va,
+						vmci_host_dev->context);
+	if (result < VMCI_SUCCESS) {
+		if (put_user(result, &info->result)) {
+			/*
+			 * Note that in this case the SetPageStore()
+			 * call failed but we were unable to
+			 * communicate that to the caller (because the
+			 * copy_to_user() call failed).  So, if we
+			 * simply return an error (in this case
+			 * -EFAULT) then the caller will know that the
+			 *  SetPageStore failed even though we couldn't
+			 *  put the result code in the result field and
+			 *  indicate exactly why it failed.
+			 *
+			 * That says nothing about the issue where we
+			 * were once able to write to the caller's info
+			 * memory and now can't.  Something more
+			 * serious is probably going on than the fact
+			 * that SetPageStore() didn't work.
+			 */
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static int vmci_host_do_qp_detach(struct vmci_host_dev *vmci_host_dev,
+				  const char *ioctl_name,
+				  void __user *uptr)
+{
+	struct vmci_qp_dtch_info detach_info;
+	struct vmci_qp_dtch_info __user *info = uptr;
+	s32 result;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&detach_info, uptr, sizeof(detach_info)))
+		return -EFAULT;
+
+	result = vmci_qp_broker_detach(detach_info.handle,
+				       vmci_host_dev->context);
+	if (result == VMCI_SUCCESS &&
+	    vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
+		result = VMCI_SUCCESS_LAST_DETACH;
+	}
+
+	return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_add_notify(struct vmci_host_dev *vmci_host_dev,
+				       const char *ioctl_name,
+				       void __user *uptr)
+{
+	struct vmci_ctx_info ar_info;
+	struct vmci_ctx_info __user *info = uptr;
+	s32 result;
+	u32 cid;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
+		return -EFAULT;
+
+	cid = vmci_ctx_get_id(vmci_host_dev->context);
+	result = vmci_ctx_add_notification(cid, ar_info.remote_cid);
+
+	return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_remove_notify(struct vmci_host_dev *vmci_host_dev,
+					  const char *ioctl_name,
+					  void __user *uptr)
+{
+	struct vmci_ctx_info ar_info;
+	struct vmci_ctx_info __user *info = uptr;
+	u32 cid;
+	int result;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&ar_info, uptr, sizeof(ar_info)))
+		return -EFAULT;
+
+	cid = vmci_ctx_get_id(vmci_host_dev->context);
+	result = vmci_ctx_remove_notification(cid,
+					      ar_info.remote_cid);
+
+	return put_user(result, &info->result) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_get_cpt_state(struct vmci_host_dev *vmci_host_dev,
+					  const char *ioctl_name,
+					  void __user *uptr)
+{
+	struct vmci_ctx_chkpt_buf_info get_info;
+	u32 cid;
+	void *cpt_buf;
+	int retval;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&get_info, uptr, sizeof(get_info)))
+		return -EFAULT;
+
+	cid = vmci_ctx_get_id(vmci_host_dev->context);
+	get_info.result = vmci_ctx_get_chkpt_state(cid, get_info.cpt_type,
+						&get_info.buf_size, &cpt_buf);
+	if (get_info.result == VMCI_SUCCESS && get_info.buf_size) {
+		void __user *ubuf = (void __user *)(uintptr_t)get_info.cpt_buf;
+		retval = copy_to_user(ubuf, cpt_buf, get_info.buf_size);
+		kfree(cpt_buf);
+
+		if (retval)
+			return -EFAULT;
+	}
+
+	return copy_to_user(uptr, &get_info, sizeof(get_info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_ctx_set_cpt_state(struct vmci_host_dev *vmci_host_dev,
+					  const char *ioctl_name,
+					  void __user *uptr)
+{
+	struct vmci_ctx_chkpt_buf_info set_info;
+	u32 cid;
+	void *cpt_buf;
+	int retval;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&set_info, uptr, sizeof(set_info)))
+		return -EFAULT;
+
+	cpt_buf = kmalloc(set_info.buf_size, GFP_KERNEL);
+	if (!cpt_buf) {
+		vmci_ioctl_err(
+			"cannot allocate memory to set cpt state (type=%d)\n",
+			set_info.cpt_type);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(cpt_buf, (void __user *)(uintptr_t)set_info.cpt_buf,
+			   set_info.buf_size)) {
+		retval = -EFAULT;
+		goto out;
+	}
+
+	cid = vmci_ctx_get_id(vmci_host_dev->context);
+	set_info.result = vmci_ctx_set_chkpt_state(cid, set_info.cpt_type,
+						   set_info.buf_size, cpt_buf);
+
+	retval = copy_to_user(uptr, &set_info, sizeof(set_info)) ? -EFAULT : 0;
+
+out:
+	kfree(cpt_buf);
+	return retval;
+}
+
+static int vmci_host_do_get_context_id(struct vmci_host_dev *vmci_host_dev,
+				       const char *ioctl_name,
+				       void __user *uptr)
+{
+	u32 __user *u32ptr = uptr;
+
+	return put_user(VMCI_HOST_CONTEXT_ID, u32ptr) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_set_notify(struct vmci_host_dev *vmci_host_dev,
+				   const char *ioctl_name,
+				   void __user *uptr)
+{
+	struct vmci_set_notify_info notify_info;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&notify_info, uptr, sizeof(notify_info)))
+		return -EFAULT;
+
+	if (notify_info.notify_uva) {
+		notify_info.result =
+			vmci_host_setup_notify(vmci_host_dev->context,
+					       notify_info.notify_uva);
+	} else {
+		vmci_ctx_unset_notify(vmci_host_dev->context);
+		notify_info.result = VMCI_SUCCESS;
+	}
+
+	return copy_to_user(uptr, &notify_info, sizeof(notify_info)) ?
+		-EFAULT : 0;
+}
+
+static int vmci_host_do_notify_resource(struct vmci_host_dev *vmci_host_dev,
+					const char *ioctl_name,
+					void __user *uptr)
+{
+	struct vmci_dbell_notify_resource_info info;
+	u32 cid;
+
+	if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
+		vmci_ioctl_err("invalid for current VMX versions\n");
+		return -EINVAL;
+	}
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&info, uptr, sizeof(info)))
+		return -EFAULT;
+
+	cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+	switch (info.action) {
+	case VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY:
+		if (info.resource == VMCI_NOTIFY_RESOURCE_DOOR_BELL) {
+			u32 flags = VMCI_NO_PRIVILEGE_FLAGS;
+			info.result = vmci_ctx_notify_dbell(cid, info.handle,
+							    flags);
+		} else {
+			info.result = VMCI_ERROR_UNAVAILABLE;
+		}
+		break;
+
+	case VMCI_NOTIFY_RESOURCE_ACTION_CREATE:
+		info.result = vmci_ctx_dbell_create(cid, info.handle);
+		break;
+
+	case VMCI_NOTIFY_RESOURCE_ACTION_DESTROY:
+		info.result = vmci_ctx_dbell_destroy(cid, info.handle);
+		break;
+
+	default:
+		vmci_ioctl_err("got unknown action (action=%d)\n",
+			       info.action);
+		info.result = VMCI_ERROR_INVALID_ARGS;
+	}
+
+	return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
+}
+
+static int vmci_host_do_recv_notifications(struct vmci_host_dev *vmci_host_dev,
+					   const char *ioctl_name,
+					   void __user *uptr)
+{
+	struct vmci_ctx_notify_recv_info info;
+	struct vmci_handle_arr *db_handle_array;
+	struct vmci_handle_arr *qp_handle_array;
+	void __user *ubuf;
+	u32 cid;
+	int retval = 0;
+
+	if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
+		vmci_ioctl_err("only valid for contexts\n");
+		return -EINVAL;
+	}
+
+	if (vmci_host_dev->user_version < VMCI_VERSION_NOTIFY) {
+		vmci_ioctl_err("not supported for the current vmx version\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&info, uptr, sizeof(info)))
+		return -EFAULT;
+
+	if ((info.db_handle_buf_size && !info.db_handle_buf_uva) ||
+	    (info.qp_handle_buf_size && !info.qp_handle_buf_uva)) {
+		return -EINVAL;
+	}
+
+	cid = vmci_ctx_get_id(vmci_host_dev->context);
+
+	info.result = vmci_ctx_rcv_notifications_get(cid,
+				&db_handle_array, &qp_handle_array);
+	if (info.result != VMCI_SUCCESS)
+		return copy_to_user(uptr, &info, sizeof(info)) ? -EFAULT : 0;
+
+	ubuf = (void __user *)(uintptr_t)info.db_handle_buf_uva;
+	info.result = drv_cp_harray_to_user(ubuf, &info.db_handle_buf_size,
+					    db_handle_array, &retval);
+	if (info.result == VMCI_SUCCESS && !retval) {
+		ubuf = (void __user *)(uintptr_t)info.qp_handle_buf_uva;
+		info.result = drv_cp_harray_to_user(ubuf,
+						    &info.qp_handle_buf_size,
+						    qp_handle_array, &retval);
+	}
+
+	if (!retval && copy_to_user(uptr, &info, sizeof(info)))
+		retval = -EFAULT;
+
+	vmci_ctx_rcv_notifications_release(cid,
+				db_handle_array, qp_handle_array,
+				info.result == VMCI_SUCCESS && !retval);
+
+	return retval;
+}
+
+static long vmci_host_unlocked_ioctl(struct file *filp,
+				     unsigned int iocmd, unsigned long ioarg)
+{
+#define VMCI_DO_IOCTL(ioctl_name, ioctl_fn) do {			\
+		char *name = __stringify(IOCTL_VMCI_ ## ioctl_name);	\
+		return vmci_host_do_ ## ioctl_fn(			\
+			vmci_host_dev, name, uptr);			\
+	} while (0)
+
+	struct vmci_host_dev *vmci_host_dev = filp->private_data;
+	void __user *uptr = (void __user *)ioarg;
+
+	switch (iocmd) {
+	case IOCTL_VMCI_INIT_CONTEXT:
+		VMCI_DO_IOCTL(INIT_CONTEXT, init_context);
+	case IOCTL_VMCI_DATAGRAM_SEND:
+		VMCI_DO_IOCTL(DATAGRAM_SEND, send_datagram);
+	case IOCTL_VMCI_DATAGRAM_RECEIVE:
+		VMCI_DO_IOCTL(DATAGRAM_RECEIVE, receive_datagram);
+	case IOCTL_VMCI_QUEUEPAIR_ALLOC:
+		VMCI_DO_IOCTL(QUEUEPAIR_ALLOC, alloc_queuepair);
+	case IOCTL_VMCI_QUEUEPAIR_SETVA:
+		VMCI_DO_IOCTL(QUEUEPAIR_SETVA, queuepair_setva);
+	case IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE:
+		VMCI_DO_IOCTL(QUEUEPAIR_SETPAGEFILE, queuepair_setpf);
+	case IOCTL_VMCI_QUEUEPAIR_DETACH:
+		VMCI_DO_IOCTL(QUEUEPAIR_DETACH, qp_detach);
+	case IOCTL_VMCI_CTX_ADD_NOTIFICATION:
+		VMCI_DO_IOCTL(CTX_ADD_NOTIFICATION, ctx_add_notify);
+	case IOCTL_VMCI_CTX_REMOVE_NOTIFICATION:
+		VMCI_DO_IOCTL(CTX_REMOVE_NOTIFICATION, ctx_remove_notify);
+	case IOCTL_VMCI_CTX_GET_CPT_STATE:
+		VMCI_DO_IOCTL(CTX_GET_CPT_STATE, ctx_get_cpt_state);
+	case IOCTL_VMCI_CTX_SET_CPT_STATE:
+		VMCI_DO_IOCTL(CTX_SET_CPT_STATE, ctx_set_cpt_state);
+	case IOCTL_VMCI_GET_CONTEXT_ID:
+		VMCI_DO_IOCTL(GET_CONTEXT_ID, get_context_id);
+	case IOCTL_VMCI_SET_NOTIFY:
+		VMCI_DO_IOCTL(SET_NOTIFY, set_notify);
+	case IOCTL_VMCI_NOTIFY_RESOURCE:
+		VMCI_DO_IOCTL(NOTIFY_RESOURCE, notify_resource);
+	case IOCTL_VMCI_NOTIFICATIONS_RECEIVE:
+		VMCI_DO_IOCTL(NOTIFICATIONS_RECEIVE, recv_notifications);
+
+	case IOCTL_VMCI_VERSION:
+	case IOCTL_VMCI_VERSION2:
+		return vmci_host_get_version(vmci_host_dev, iocmd, uptr);
+
+	default:
+		pr_devel("%s: Unknown ioctl (iocmd=%d)\n", __func__, iocmd);
+		return -EINVAL;
+	}
+
+#undef VMCI_DO_IOCTL
+}
+
+static const struct file_operations vmuser_fops = {
+	.owner		= THIS_MODULE,
+	.open		= vmci_host_open,
+	.release	= vmci_host_close,
+	.poll		= vmci_host_poll,
+	.unlocked_ioctl	= vmci_host_unlocked_ioctl,
+	.compat_ioctl	= vmci_host_unlocked_ioctl,
+};
+
+static struct miscdevice vmci_host_miscdev = {
+	 .name = "vmci",
+	 .minor = MISC_DYNAMIC_MINOR,
+	 .fops = &vmuser_fops,
+};
+
+int __init vmci_host_init(void)
+{
+	int error;
+
+	host_context = vmci_ctx_create(VMCI_HOST_CONTEXT_ID,
+					VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS,
+					-1, VMCI_VERSION, NULL);
+	if (IS_ERR(host_context)) {
+		error = PTR_ERR(host_context);
+		pr_warn("Failed to initialize VMCIContext (error%d)\n",
+			error);
+		return error;
+	}
+
+	error = misc_register(&vmci_host_miscdev);
+	if (error) {
+		pr_warn("Module registration error (name=%s, major=%d, minor=%d, err=%d)\n",
+			vmci_host_miscdev.name,
+			MISC_MAJOR, vmci_host_miscdev.minor,
+			error);
+		pr_warn("Unable to initialize host personality\n");
+		vmci_ctx_destroy(host_context);
+		return error;
+	}
+
+	pr_info("VMCI host device registered (name=%s, major=%d, minor=%d)\n",
+		vmci_host_miscdev.name, MISC_MAJOR, vmci_host_miscdev.minor);
+
+	vmci_host_device_initialized = true;
+	return 0;
+}
+
+void __exit vmci_host_exit(void)
+{
+	int error;
+
+	vmci_host_device_initialized = false;
+
+	error = misc_deregister(&vmci_host_miscdev);
+	if (error)
+		pr_warn("Error unregistering character device: %d\n", error);
+
+	vmci_ctx_destroy(host_context);
+	vmci_qp_broker_exit();
+
+	pr_debug("VMCI host driver module unloaded\n");
+}
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
new file mode 100644
index 0000000..d94245d
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -0,0 +1,3425 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <linux/wait.h>
+#include <linux/vmalloc.h>
+
+#include "vmci_handle_array.h"
+#include "vmci_queue_pair.h"
+#include "vmci_datagram.h"
+#include "vmci_resource.h"
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_event.h"
+#include "vmci_route.h"
+
+/*
+ * In the following, we will distinguish between two kinds of VMX processes -
+ * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
+ * VMCI page files in the VMX and supporting VM to VM communication and the
+ * newer ones that use the guest memory directly. We will in the following
+ * refer to the older VMX versions as old-style VMX'en, and the newer ones as
+ * new-style VMX'en.
+ *
+ * The state transition datagram is as follows (the VMCIQPB_ prefix has been
+ * removed for readability) - see below for more details on the transtions:
+ *
+ *            --------------  NEW  -------------
+ *            |                                |
+ *           \_/                              \_/
+ *     CREATED_NO_MEM <-----------------> CREATED_MEM
+ *            |    |                           |
+ *            |    o-----------------------o   |
+ *            |                            |   |
+ *           \_/                          \_/ \_/
+ *     ATTACHED_NO_MEM <----------------> ATTACHED_MEM
+ *            |                            |   |
+ *            |     o----------------------o   |
+ *            |     |                          |
+ *           \_/   \_/                        \_/
+ *     SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
+ *            |                                |
+ *            |                                |
+ *            -------------> gone <-------------
+ *
+ * In more detail. When a VMCI queue pair is first created, it will be in the
+ * VMCIQPB_NEW state. It will then move into one of the following states:
+ *
+ * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
+ *
+ *     - the created was performed by a host endpoint, in which case there is
+ *       no backing memory yet.
+ *
+ *     - the create was initiated by an old-style VMX, that uses
+ *       vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
+ *       a later point in time. This state can be distinguished from the one
+ *       above by the context ID of the creator. A host side is not allowed to
+ *       attach until the page store has been set.
+ *
+ * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
+ *     is created by a VMX using the queue pair device backend that
+ *     sets the UVAs of the queue pair immediately and stores the
+ *     information for later attachers. At this point, it is ready for
+ *     the host side to attach to it.
+ *
+ * Once the queue pair is in one of the created states (with the exception of
+ * the case mentioned for older VMX'en above), it is possible to attach to the
+ * queue pair. Again we have two new states possible:
+ *
+ * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
+ *   paths:
+ *
+ *     - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
+ *       pair, and attaches to a queue pair previously created by the host side.
+ *
+ *     - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
+ *       already created by a guest.
+ *
+ *     - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
+ *       vmci_qp_broker_set_page_store (see below).
+ *
+ * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
+ *     VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
+ *     bring the queue pair into this state. Once vmci_qp_broker_set_page_store
+ *     is called to register the user memory, the VMCIQPB_ATTACH_MEM state
+ *     will be entered.
+ *
+ * From the attached queue pair, the queue pair can enter the shutdown states
+ * when either side of the queue pair detaches. If the guest side detaches
+ * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
+ * the content of the queue pair will no longer be available. If the host
+ * side detaches first, the queue pair will either enter the
+ * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
+ * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
+ * (e.g., the host detaches while a guest is stunned).
+ *
+ * New-style VMX'en will also unmap guest memory, if the guest is
+ * quiesced, e.g., during a snapshot operation. In that case, the guest
+ * memory will no longer be available, and the queue pair will transition from
+ * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
+ * in which case the queue pair will transition from the *_NO_MEM state at that
+ * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
+ * since the peer may have either attached or detached in the meantime. The
+ * values are laid out such that ++ on a state will move from a *_NO_MEM to a
+ * *_MEM state, and vice versa.
+ */
+
+/*
+ * VMCIMemcpy{To,From}QueueFunc() prototypes.  Functions of these
+ * types are passed around to enqueue and dequeue routines.  Note that
+ * often the functions passed are simply wrappers around memcpy
+ * itself.
+ *
+ * Note: In order for the memcpy typedefs to be compatible with the VMKernel,
+ * there's an unused last parameter for the hosted side.  In
+ * ESX, that parameter holds a buffer type.
+ */
+typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
+				      u64 queue_offset, const void *src,
+				      size_t src_offset, size_t size);
+typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
+					const struct vmci_queue *queue,
+					u64 queue_offset, size_t size);
+
+/* The Kernel specific component of the struct vmci_queue structure. */
+struct vmci_queue_kern_if {
+	struct page **page;
+	struct page **header_page;
+	void *va;
+	struct mutex __mutex;	/* Protects the queue. */
+	struct mutex *mutex;	/* Shared by producer and consumer queues. */
+	bool host;
+	size_t num_pages;
+	bool mapped;
+};
+
+/*
+ * This structure is opaque to the clients.
+ */
+struct vmci_qp {
+	struct vmci_handle handle;
+	struct vmci_queue *produce_q;
+	struct vmci_queue *consume_q;
+	u64 produce_q_size;
+	u64 consume_q_size;
+	u32 peer;
+	u32 flags;
+	u32 priv_flags;
+	bool guest_endpoint;
+	unsigned int blocked;
+	unsigned int generation;
+	wait_queue_head_t event;
+};
+
+enum qp_broker_state {
+	VMCIQPB_NEW,
+	VMCIQPB_CREATED_NO_MEM,
+	VMCIQPB_CREATED_MEM,
+	VMCIQPB_ATTACHED_NO_MEM,
+	VMCIQPB_ATTACHED_MEM,
+	VMCIQPB_SHUTDOWN_NO_MEM,
+	VMCIQPB_SHUTDOWN_MEM,
+	VMCIQPB_GONE
+};
+
+#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
+				     _qpb->state == VMCIQPB_ATTACHED_MEM || \
+				     _qpb->state == VMCIQPB_SHUTDOWN_MEM)
+
+/*
+ * In the queue pair broker, we always use the guest point of view for
+ * the produce and consume queue values and references, e.g., the
+ * produce queue size stored is the guests produce queue size. The
+ * host endpoint will need to swap these around. The only exception is
+ * the local queue pairs on the host, in which case the host endpoint
+ * that creates the queue pair will have the right orientation, and
+ * the attaching host endpoint will need to swap.
+ */
+struct qp_entry {
+	struct list_head list_item;
+	struct vmci_handle handle;
+	u32 peer;
+	u32 flags;
+	u64 produce_size;
+	u64 consume_size;
+	u32 ref_count;
+};
+
+struct qp_broker_entry {
+	struct vmci_resource resource;
+	struct qp_entry qp;
+	u32 create_id;
+	u32 attach_id;
+	enum qp_broker_state state;
+	bool require_trusted_attach;
+	bool created_by_trusted;
+	bool vmci_page_files;	/* Created by VMX using VMCI page files */
+	struct vmci_queue *produce_q;
+	struct vmci_queue *consume_q;
+	struct vmci_queue_header saved_produce_q;
+	struct vmci_queue_header saved_consume_q;
+	vmci_event_release_cb wakeup_cb;
+	void *client_data;
+	void *local_mem;	/* Kernel memory for local queue pair */
+};
+
+struct qp_guest_endpoint {
+	struct vmci_resource resource;
+	struct qp_entry qp;
+	u64 num_ppns;
+	void *produce_q;
+	void *consume_q;
+	struct ppn_set ppn_set;
+};
+
+struct qp_list {
+	struct list_head head;
+	struct mutex mutex;	/* Protect queue list. */
+};
+
+static struct qp_list qp_broker_list = {
+	.head = LIST_HEAD_INIT(qp_broker_list.head),
+	.mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
+};
+
+static struct qp_list qp_guest_endpoints = {
+	.head = LIST_HEAD_INIT(qp_guest_endpoints.head),
+	.mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
+};
+
+#define INVALID_VMCI_GUEST_MEM_ID  0
+#define QPE_NUM_PAGES(_QPE) ((u32) \
+			     (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
+			      DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
+
+
+/*
+ * Frees kernel VA space for a given queue and its queue header, and
+ * frees physical data pages.
+ */
+static void qp_free_queue(void *q, u64 size)
+{
+	struct vmci_queue *queue = q;
+
+	if (queue) {
+		u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
+
+		if (queue->kernel_if->mapped) {
+			vunmap(queue->kernel_if->va);
+			queue->kernel_if->va = NULL;
+		}
+
+		while (i)
+			__free_page(queue->kernel_if->page[--i]);
+
+		vfree(queue->q_header);
+	}
+}
+
+/*
+ * Allocates kernel VA space of specified size, plus space for the
+ * queue structure/kernel interface and the queue header.  Allocates
+ * physical pages for the queue data pages.
+ *
+ * PAGE m:      struct vmci_queue_header (struct vmci_queue->q_header)
+ * PAGE m+1:    struct vmci_queue
+ * PAGE m+1+q:  struct vmci_queue_kern_if (struct vmci_queue->kernel_if)
+ * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[])
+ */
+static void *qp_alloc_queue(u64 size, u32 flags)
+{
+	u64 i;
+	struct vmci_queue *queue;
+	struct vmci_queue_header *q_header;
+	const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+	const uint queue_size =
+	    PAGE_SIZE +
+	    sizeof(*queue) + sizeof(*(queue->kernel_if)) +
+	    num_data_pages * sizeof(*(queue->kernel_if->page));
+
+	q_header = vmalloc(queue_size);
+	if (!q_header)
+		return NULL;
+
+	queue = (void *)q_header + PAGE_SIZE;
+	queue->q_header = q_header;
+	queue->saved_header = NULL;
+	queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
+	queue->kernel_if->header_page = NULL;	/* Unused in guest. */
+	queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
+	queue->kernel_if->host = false;
+	queue->kernel_if->va = NULL;
+	queue->kernel_if->mapped = false;
+
+	for (i = 0; i < num_data_pages; i++) {
+		queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
+		if (!queue->kernel_if->page[i])
+			goto fail;
+	}
+
+	if (vmci_qp_pinned(flags)) {
+		queue->kernel_if->va =
+		    vmap(queue->kernel_if->page, num_data_pages, VM_MAP,
+			 PAGE_KERNEL);
+		if (!queue->kernel_if->va)
+			goto fail;
+
+		queue->kernel_if->mapped = true;
+	}
+
+	return (void *)queue;
+
+ fail:
+	qp_free_queue(queue, i * PAGE_SIZE);
+	return NULL;
+}
+
+/*
+ * Copies from a given buffer or iovector to a VMCI Queue.  Uses
+ * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * by traversing the offset -> page translation structure for the queue.
+ * Assumes that offset + size does not wrap around in the queue.
+ */
+static int __qp_memcpy_to_queue(struct vmci_queue *queue,
+				u64 queue_offset,
+				const void *src,
+				size_t size,
+				bool is_iovec)
+{
+	struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
+	size_t bytes_copied = 0;
+
+	while (bytes_copied < size) {
+		u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
+		size_t page_offset =
+		    (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
+		void *va;
+		size_t to_copy;
+
+		if (!kernel_if->mapped)
+			va = kmap(kernel_if->page[page_index]);
+		else
+			va = (void *)((u8 *)kernel_if->va +
+				      (page_index * PAGE_SIZE));
+
+		if (size - bytes_copied > PAGE_SIZE - page_offset)
+			/* Enough payload to fill up from this page. */
+			to_copy = PAGE_SIZE - page_offset;
+		else
+			to_copy = size - bytes_copied;
+
+		if (is_iovec) {
+			struct iovec *iov = (struct iovec *)src;
+			int err;
+
+			/* The iovec will track bytes_copied internally. */
+			err = memcpy_fromiovec((u8 *)va + page_offset,
+					       iov, to_copy);
+			if (err != 0) {
+				kunmap(kernel_if->page[page_index]);
+				return VMCI_ERROR_INVALID_ARGS;
+			}
+		} else {
+			memcpy((u8 *)va + page_offset,
+			       (u8 *)src + bytes_copied, to_copy);
+		}
+
+		bytes_copied += to_copy;
+		if (!kernel_if->mapped)
+			kunmap(kernel_if->page[page_index]);
+	}
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Copies to a given buffer or iovector from a VMCI Queue.  Uses
+ * kmap()/kunmap() to dynamically map/unmap required portions of the queue
+ * by traversing the offset -> page translation structure for the queue.
+ * Assumes that offset + size does not wrap around in the queue.
+ */
+static int __qp_memcpy_from_queue(void *dest,
+				  const struct vmci_queue *queue,
+				  u64 queue_offset,
+				  size_t size,
+				  bool is_iovec)
+{
+	struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
+	size_t bytes_copied = 0;
+
+	while (bytes_copied < size) {
+		u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
+		size_t page_offset =
+		    (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
+		void *va;
+		size_t to_copy;
+
+		if (!kernel_if->mapped)
+			va = kmap(kernel_if->page[page_index]);
+		else
+			va = (void *)((u8 *)kernel_if->va +
+				      (page_index * PAGE_SIZE));
+
+		if (size - bytes_copied > PAGE_SIZE - page_offset)
+			/* Enough payload to fill up this page. */
+			to_copy = PAGE_SIZE - page_offset;
+		else
+			to_copy = size - bytes_copied;
+
+		if (is_iovec) {
+			struct iovec *iov = (struct iovec *)dest;
+			int err;
+
+			/* The iovec will track bytes_copied internally. */
+			err = memcpy_toiovec(iov, (u8 *)va + page_offset,
+					     to_copy);
+			if (err != 0) {
+				kunmap(kernel_if->page[page_index]);
+				return VMCI_ERROR_INVALID_ARGS;
+			}
+		} else {
+			memcpy((u8 *)dest + bytes_copied,
+			       (u8 *)va + page_offset, to_copy);
+		}
+
+		bytes_copied += to_copy;
+		if (!kernel_if->mapped)
+			kunmap(kernel_if->page[page_index]);
+	}
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Allocates two list of PPNs --- one for the pages in the produce queue,
+ * and the other for the pages in the consume queue. Intializes the list
+ * of PPNs with the page frame numbers of the KVA for the two queues (and
+ * the queue headers).
+ */
+static int qp_alloc_ppn_set(void *prod_q,
+			    u64 num_produce_pages,
+			    void *cons_q,
+			    u64 num_consume_pages, struct ppn_set *ppn_set)
+{
+	u32 *produce_ppns;
+	u32 *consume_ppns;
+	struct vmci_queue *produce_q = prod_q;
+	struct vmci_queue *consume_q = cons_q;
+	u64 i;
+
+	if (!produce_q || !num_produce_pages || !consume_q ||
+	    !num_consume_pages || !ppn_set)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	if (ppn_set->initialized)
+		return VMCI_ERROR_ALREADY_EXISTS;
+
+	produce_ppns =
+	    kmalloc(num_produce_pages * sizeof(*produce_ppns), GFP_KERNEL);
+	if (!produce_ppns)
+		return VMCI_ERROR_NO_MEM;
+
+	consume_ppns =
+	    kmalloc(num_consume_pages * sizeof(*consume_ppns), GFP_KERNEL);
+	if (!consume_ppns) {
+		kfree(produce_ppns);
+		return VMCI_ERROR_NO_MEM;
+	}
+
+	produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header));
+	for (i = 1; i < num_produce_pages; i++) {
+		unsigned long pfn;
+
+		produce_ppns[i] =
+		    page_to_pfn(produce_q->kernel_if->page[i - 1]);
+		pfn = produce_ppns[i];
+
+		/* Fail allocation if PFN isn't supported by hypervisor. */
+		if (sizeof(pfn) > sizeof(*produce_ppns)
+		    && pfn != produce_ppns[i])
+			goto ppn_error;
+	}
+
+	consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header));
+	for (i = 1; i < num_consume_pages; i++) {
+		unsigned long pfn;
+
+		consume_ppns[i] =
+		    page_to_pfn(consume_q->kernel_if->page[i - 1]);
+		pfn = consume_ppns[i];
+
+		/* Fail allocation if PFN isn't supported by hypervisor. */
+		if (sizeof(pfn) > sizeof(*consume_ppns)
+		    && pfn != consume_ppns[i])
+			goto ppn_error;
+	}
+
+	ppn_set->num_produce_pages = num_produce_pages;
+	ppn_set->num_consume_pages = num_consume_pages;
+	ppn_set->produce_ppns = produce_ppns;
+	ppn_set->consume_ppns = consume_ppns;
+	ppn_set->initialized = true;
+	return VMCI_SUCCESS;
+
+ ppn_error:
+	kfree(produce_ppns);
+	kfree(consume_ppns);
+	return VMCI_ERROR_INVALID_ARGS;
+}
+
+/*
+ * Frees the two list of PPNs for a queue pair.
+ */
+static void qp_free_ppn_set(struct ppn_set *ppn_set)
+{
+	if (ppn_set->initialized) {
+		/* Do not call these functions on NULL inputs. */
+		kfree(ppn_set->produce_ppns);
+		kfree(ppn_set->consume_ppns);
+	}
+	memset(ppn_set, 0, sizeof(*ppn_set));
+}
+
+/*
+ * Populates the list of PPNs in the hypercall structure with the PPNS
+ * of the produce queue and the consume queue.
+ */
+static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
+{
+	memcpy(call_buf, ppn_set->produce_ppns,
+	       ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns));
+	memcpy(call_buf +
+	       ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns),
+	       ppn_set->consume_ppns,
+	       ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns));
+
+	return VMCI_SUCCESS;
+}
+
+static int qp_memcpy_to_queue(struct vmci_queue *queue,
+			      u64 queue_offset,
+			      const void *src, size_t src_offset, size_t size)
+{
+	return __qp_memcpy_to_queue(queue, queue_offset,
+				    (u8 *)src + src_offset, size, false);
+}
+
+static int qp_memcpy_from_queue(void *dest,
+				size_t dest_offset,
+				const struct vmci_queue *queue,
+				u64 queue_offset, size_t size)
+{
+	return __qp_memcpy_from_queue((u8 *)dest + dest_offset,
+				      queue, queue_offset, size, false);
+}
+
+/*
+ * Copies from a given iovec from a VMCI Queue.
+ */
+static int qp_memcpy_to_queue_iov(struct vmci_queue *queue,
+				  u64 queue_offset,
+				  const void *src,
+				  size_t src_offset, size_t size)
+{
+
+	/*
+	 * We ignore src_offset because src is really a struct iovec * and will
+	 * maintain offset internally.
+	 */
+	return __qp_memcpy_to_queue(queue, queue_offset, src, size, true);
+}
+
+/*
+ * Copies to a given iovec from a VMCI Queue.
+ */
+static int qp_memcpy_from_queue_iov(void *dest,
+				    size_t dest_offset,
+				    const struct vmci_queue *queue,
+				    u64 queue_offset, size_t size)
+{
+	/*
+	 * We ignore dest_offset because dest is really a struct iovec * and
+	 * will maintain offset internally.
+	 */
+	return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true);
+}
+
+/*
+ * Allocates kernel VA space of specified size plus space for the queue
+ * and kernel interface.  This is different from the guest queue allocator,
+ * because we do not allocate our own queue header/data pages here but
+ * share those of the guest.
+ */
+static struct vmci_queue *qp_host_alloc_queue(u64 size)
+{
+	struct vmci_queue *queue;
+	const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+	const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
+	const size_t queue_page_size =
+	    num_pages * sizeof(*queue->kernel_if->page);
+
+	queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
+	if (queue) {
+		queue->q_header = NULL;
+		queue->saved_header = NULL;
+		queue->kernel_if =
+		    (struct vmci_queue_kern_if *)((u8 *)queue +
+						  sizeof(*queue));
+		queue->kernel_if->host = true;
+		queue->kernel_if->mutex = NULL;
+		queue->kernel_if->num_pages = num_pages;
+		queue->kernel_if->header_page =
+		    (struct page **)((u8 *)queue + queue_size);
+		queue->kernel_if->page = &queue->kernel_if->header_page[1];
+		queue->kernel_if->va = NULL;
+		queue->kernel_if->mapped = false;
+	}
+
+	return queue;
+}
+
+/*
+ * Frees kernel memory for a given queue (header plus translation
+ * structure).
+ */
+static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
+{
+	kfree(queue);
+}
+
+/*
+ * Initialize the mutex for the pair of queues.  This mutex is used to
+ * protect the q_header and the buffer from changing out from under any
+ * users of either queue.  Of course, it's only any good if the mutexes
+ * are actually acquired.  Queue structure must lie on non-paged memory
+ * or we cannot guarantee access to the mutex.
+ */
+static void qp_init_queue_mutex(struct vmci_queue *produce_q,
+				struct vmci_queue *consume_q)
+{
+	/*
+	 * Only the host queue has shared state - the guest queues do not
+	 * need to synchronize access using a queue mutex.
+	 */
+
+	if (produce_q->kernel_if->host) {
+		produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
+		consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
+		mutex_init(produce_q->kernel_if->mutex);
+	}
+}
+
+/*
+ * Cleans up the mutex for the pair of queues.
+ */
+static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
+				   struct vmci_queue *consume_q)
+{
+	if (produce_q->kernel_if->host) {
+		produce_q->kernel_if->mutex = NULL;
+		consume_q->kernel_if->mutex = NULL;
+	}
+}
+
+/*
+ * Acquire the mutex for the queue.  Note that the produce_q and
+ * the consume_q share a mutex.  So, only one of the two need to
+ * be passed in to this routine.  Either will work just fine.
+ */
+static void qp_acquire_queue_mutex(struct vmci_queue *queue)
+{
+	if (queue->kernel_if->host)
+		mutex_lock(queue->kernel_if->mutex);
+}
+
+/*
+ * Release the mutex for the queue.  Note that the produce_q and
+ * the consume_q share a mutex.  So, only one of the two need to
+ * be passed in to this routine.  Either will work just fine.
+ */
+static void qp_release_queue_mutex(struct vmci_queue *queue)
+{
+	if (queue->kernel_if->host)
+		mutex_unlock(queue->kernel_if->mutex);
+}
+
+/*
+ * Helper function to release pages in the PageStoreAttachInfo
+ * previously obtained using get_user_pages.
+ */
+static void qp_release_pages(struct page **pages,
+			     u64 num_pages, bool dirty)
+{
+	int i;
+
+	for (i = 0; i < num_pages; i++) {
+		if (dirty)
+			set_page_dirty(pages[i]);
+
+		page_cache_release(pages[i]);
+		pages[i] = NULL;
+	}
+}
+
+/*
+ * Lock the user pages referenced by the {produce,consume}Buffer
+ * struct into memory and populate the {produce,consume}Pages
+ * arrays in the attach structure with them.
+ */
+static int qp_host_get_user_memory(u64 produce_uva,
+				   u64 consume_uva,
+				   struct vmci_queue *produce_q,
+				   struct vmci_queue *consume_q)
+{
+	int retval;
+	int err = VMCI_SUCCESS;
+
+	down_write(&current->mm->mmap_sem);
+	retval = get_user_pages(current,
+				current->mm,
+				(uintptr_t) produce_uva,
+				produce_q->kernel_if->num_pages,
+				1, 0, produce_q->kernel_if->header_page, NULL);
+	if (retval < produce_q->kernel_if->num_pages) {
+		pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
+		qp_release_pages(produce_q->kernel_if->header_page, retval,
+				 false);
+		err = VMCI_ERROR_NO_MEM;
+		goto out;
+	}
+
+	retval = get_user_pages(current,
+				current->mm,
+				(uintptr_t) consume_uva,
+				consume_q->kernel_if->num_pages,
+				1, 0, consume_q->kernel_if->header_page, NULL);
+	if (retval < consume_q->kernel_if->num_pages) {
+		pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
+		qp_release_pages(consume_q->kernel_if->header_page, retval,
+				 false);
+		qp_release_pages(produce_q->kernel_if->header_page,
+				 produce_q->kernel_if->num_pages, false);
+		err = VMCI_ERROR_NO_MEM;
+	}
+
+ out:
+	up_write(&current->mm->mmap_sem);
+
+	return err;
+}
+
+/*
+ * Registers the specification of the user pages used for backing a queue
+ * pair. Enough information to map in pages is stored in the OS specific
+ * part of the struct vmci_queue structure.
+ */
+static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
+					struct vmci_queue *produce_q,
+					struct vmci_queue *consume_q)
+{
+	u64 produce_uva;
+	u64 consume_uva;
+
+	/*
+	 * The new style and the old style mapping only differs in
+	 * that we either get a single or two UVAs, so we split the
+	 * single UVA range at the appropriate spot.
+	 */
+	produce_uva = page_store->pages;
+	consume_uva = page_store->pages +
+	    produce_q->kernel_if->num_pages * PAGE_SIZE;
+	return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
+				       consume_q);
+}
+
+/*
+ * Releases and removes the references to user pages stored in the attach
+ * struct.  Pages are released from the page cache and may become
+ * swappable again.
+ */
+static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
+					   struct vmci_queue *consume_q)
+{
+	qp_release_pages(produce_q->kernel_if->header_page,
+			 produce_q->kernel_if->num_pages, true);
+	memset(produce_q->kernel_if->header_page, 0,
+	       sizeof(*produce_q->kernel_if->header_page) *
+	       produce_q->kernel_if->num_pages);
+	qp_release_pages(consume_q->kernel_if->header_page,
+			 consume_q->kernel_if->num_pages, true);
+	memset(consume_q->kernel_if->header_page, 0,
+	       sizeof(*consume_q->kernel_if->header_page) *
+	       consume_q->kernel_if->num_pages);
+}
+
+/*
+ * Once qp_host_register_user_memory has been performed on a
+ * queue, the queue pair headers can be mapped into the
+ * kernel. Once mapped, they must be unmapped with
+ * qp_host_unmap_queues prior to calling
+ * qp_host_unregister_user_memory.
+ * Pages are pinned.
+ */
+static int qp_host_map_queues(struct vmci_queue *produce_q,
+			      struct vmci_queue *consume_q)
+{
+	int result;
+
+	if (!produce_q->q_header || !consume_q->q_header) {
+		struct page *headers[2];
+
+		if (produce_q->q_header != consume_q->q_header)
+			return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+
+		if (produce_q->kernel_if->header_page == NULL ||
+		    *produce_q->kernel_if->header_page == NULL)
+			return VMCI_ERROR_UNAVAILABLE;
+
+		headers[0] = *produce_q->kernel_if->header_page;
+		headers[1] = *consume_q->kernel_if->header_page;
+
+		produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
+		if (produce_q->q_header != NULL) {
+			consume_q->q_header =
+			    (struct vmci_queue_header *)((u8 *)
+							 produce_q->q_header +
+							 PAGE_SIZE);
+			result = VMCI_SUCCESS;
+		} else {
+			pr_warn("vmap failed\n");
+			result = VMCI_ERROR_NO_MEM;
+		}
+	} else {
+		result = VMCI_SUCCESS;
+	}
+
+	return result;
+}
+
+/*
+ * Unmaps previously mapped queue pair headers from the kernel.
+ * Pages are unpinned.
+ */
+static int qp_host_unmap_queues(u32 gid,
+				struct vmci_queue *produce_q,
+				struct vmci_queue *consume_q)
+{
+	if (produce_q->q_header) {
+		if (produce_q->q_header < consume_q->q_header)
+			vunmap(produce_q->q_header);
+		else
+			vunmap(consume_q->q_header);
+
+		produce_q->q_header = NULL;
+		consume_q->q_header = NULL;
+	}
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle. Assumes
+ * that the list is locked.
+ */
+static struct qp_entry *qp_list_find(struct qp_list *qp_list,
+				     struct vmci_handle handle)
+{
+	struct qp_entry *entry;
+
+	if (vmci_handle_is_invalid(handle))
+		return NULL;
+
+	list_for_each_entry(entry, &qp_list->head, list_item) {
+		if (vmci_handle_is_equal(entry->handle, handle))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle.
+ */
+static struct qp_guest_endpoint *
+qp_guest_handle_to_entry(struct vmci_handle handle)
+{
+	struct qp_guest_endpoint *entry;
+	struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
+
+	entry = qp ? container_of(
+		qp, struct qp_guest_endpoint, qp) : NULL;
+	return entry;
+}
+
+/*
+ * Finds the entry in the list corresponding to a given handle.
+ */
+static struct qp_broker_entry *
+qp_broker_handle_to_entry(struct vmci_handle handle)
+{
+	struct qp_broker_entry *entry;
+	struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
+
+	entry = qp ? container_of(
+		qp, struct qp_broker_entry, qp) : NULL;
+	return entry;
+}
+
+/*
+ * Dispatches a queue pair event message directly into the local event
+ * queue.
+ */
+static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
+{
+	u32 context_id = vmci_get_context_id();
+	struct vmci_event_qp ev;
+
+	ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
+	ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+					  VMCI_CONTEXT_RESOURCE_ID);
+	ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+	ev.msg.event_data.event =
+	    attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
+	ev.payload.peer_id = context_id;
+	ev.payload.handle = handle;
+
+	return vmci_event_dispatch(&ev.msg.hdr);
+}
+
+/*
+ * Allocates and initializes a qp_guest_endpoint structure.
+ * Allocates a queue_pair rid (and handle) iff the given entry has
+ * an invalid handle.  0 through VMCI_RESERVED_RESOURCE_ID_MAX
+ * are reserved handles.  Assumes that the QP list mutex is held
+ * by the caller.
+ */
+static struct qp_guest_endpoint *
+qp_guest_endpoint_create(struct vmci_handle handle,
+			 u32 peer,
+			 u32 flags,
+			 u64 produce_size,
+			 u64 consume_size,
+			 void *produce_q,
+			 void *consume_q)
+{
+	int result;
+	struct qp_guest_endpoint *entry;
+	/* One page each for the queue headers. */
+	const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
+	    DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
+
+	if (vmci_handle_is_invalid(handle)) {
+		u32 context_id = vmci_get_context_id();
+
+		handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
+	}
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (entry) {
+		entry->qp.peer = peer;
+		entry->qp.flags = flags;
+		entry->qp.produce_size = produce_size;
+		entry->qp.consume_size = consume_size;
+		entry->qp.ref_count = 0;
+		entry->num_ppns = num_ppns;
+		entry->produce_q = produce_q;
+		entry->consume_q = consume_q;
+		INIT_LIST_HEAD(&entry->qp.list_item);
+
+		/* Add resource obj */
+		result = vmci_resource_add(&entry->resource,
+					   VMCI_RESOURCE_TYPE_QPAIR_GUEST,
+					   handle);
+		entry->qp.handle = vmci_resource_handle(&entry->resource);
+		if ((result != VMCI_SUCCESS) ||
+		    qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
+			pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
+				handle.context, handle.resource, result);
+			kfree(entry);
+			entry = NULL;
+		}
+	}
+	return entry;
+}
+
+/*
+ * Frees a qp_guest_endpoint structure.
+ */
+static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
+{
+	qp_free_ppn_set(&entry->ppn_set);
+	qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
+	qp_free_queue(entry->produce_q, entry->qp.produce_size);
+	qp_free_queue(entry->consume_q, entry->qp.consume_size);
+	/* Unlink from resource hash table and free callback */
+	vmci_resource_remove(&entry->resource);
+
+	kfree(entry);
+}
+
+/*
+ * Helper to make a queue_pairAlloc hypercall when the driver is
+ * supporting a guest device.
+ */
+static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
+{
+	struct vmci_qp_alloc_msg *alloc_msg;
+	size_t msg_size;
+	int result;
+
+	if (!entry || entry->num_ppns <= 2)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	msg_size = sizeof(*alloc_msg) +
+	    (size_t) entry->num_ppns * sizeof(u32);
+	alloc_msg = kmalloc(msg_size, GFP_KERNEL);
+	if (!alloc_msg)
+		return VMCI_ERROR_NO_MEM;
+
+	alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+					      VMCI_QUEUEPAIR_ALLOC);
+	alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
+	alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
+	alloc_msg->handle = entry->qp.handle;
+	alloc_msg->peer = entry->qp.peer;
+	alloc_msg->flags = entry->qp.flags;
+	alloc_msg->produce_size = entry->qp.produce_size;
+	alloc_msg->consume_size = entry->qp.consume_size;
+	alloc_msg->num_ppns = entry->num_ppns;
+
+	result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
+				     &entry->ppn_set);
+	if (result == VMCI_SUCCESS)
+		result = vmci_send_datagram(&alloc_msg->hdr);
+
+	kfree(alloc_msg);
+
+	return result;
+}
+
+/*
+ * Helper to make a queue_pairDetach hypercall when the driver is
+ * supporting a guest device.
+ */
+static int qp_detatch_hypercall(struct vmci_handle handle)
+{
+	struct vmci_qp_detach_msg detach_msg;
+
+	detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+					      VMCI_QUEUEPAIR_DETACH);
+	detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
+	detach_msg.hdr.payload_size = sizeof(handle);
+	detach_msg.handle = handle;
+
+	return vmci_send_datagram(&detach_msg.hdr);
+}
+
+/*
+ * Adds the given entry to the list. Assumes that the list is locked.
+ */
+static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
+{
+	if (entry)
+		list_add(&entry->list_item, &qp_list->head);
+}
+
+/*
+ * Removes the given entry from the list. Assumes that the list is locked.
+ */
+static void qp_list_remove_entry(struct qp_list *qp_list,
+				 struct qp_entry *entry)
+{
+	if (entry)
+		list_del(&entry->list_item);
+}
+
+/*
+ * Helper for VMCI queue_pair detach interface. Frees the physical
+ * pages for the queue pair.
+ */
+static int qp_detatch_guest_work(struct vmci_handle handle)
+{
+	int result;
+	struct qp_guest_endpoint *entry;
+	u32 ref_count = ~0;	/* To avoid compiler warning below */
+
+	mutex_lock(&qp_guest_endpoints.mutex);
+
+	entry = qp_guest_handle_to_entry(handle);
+	if (!entry) {
+		mutex_unlock(&qp_guest_endpoints.mutex);
+		return VMCI_ERROR_NOT_FOUND;
+	}
+
+	if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+		result = VMCI_SUCCESS;
+
+		if (entry->qp.ref_count > 1) {
+			result = qp_notify_peer_local(false, handle);
+			/*
+			 * We can fail to notify a local queuepair
+			 * because we can't allocate.  We still want
+			 * to release the entry if that happens, so
+			 * don't bail out yet.
+			 */
+		}
+	} else {
+		result = qp_detatch_hypercall(handle);
+		if (result < VMCI_SUCCESS) {
+			/*
+			 * We failed to notify a non-local queuepair.
+			 * That other queuepair might still be
+			 * accessing the shared memory, so don't
+			 * release the entry yet.  It will get cleaned
+			 * up by VMCIqueue_pair_Exit() if necessary
+			 * (assuming we are going away, otherwise why
+			 * did this fail?).
+			 */
+
+			mutex_unlock(&qp_guest_endpoints.mutex);
+			return result;
+		}
+	}
+
+	/*
+	 * If we get here then we either failed to notify a local queuepair, or
+	 * we succeeded in all cases.  Release the entry if required.
+	 */
+
+	entry->qp.ref_count--;
+	if (entry->qp.ref_count == 0)
+		qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
+
+	/* If we didn't remove the entry, this could change once we unlock. */
+	if (entry)
+		ref_count = entry->qp.ref_count;
+
+	mutex_unlock(&qp_guest_endpoints.mutex);
+
+	if (ref_count == 0)
+		qp_guest_endpoint_destroy(entry);
+
+	return result;
+}
+
+/*
+ * This functions handles the actual allocation of a VMCI queue
+ * pair guest endpoint. Allocates physical pages for the queue
+ * pair. It makes OS dependent calls through generic wrappers.
+ */
+static int qp_alloc_guest_work(struct vmci_handle *handle,
+			       struct vmci_queue **produce_q,
+			       u64 produce_size,
+			       struct vmci_queue **consume_q,
+			       u64 consume_size,
+			       u32 peer,
+			       u32 flags,
+			       u32 priv_flags)
+{
+	const u64 num_produce_pages =
+	    DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
+	const u64 num_consume_pages =
+	    DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
+	void *my_produce_q = NULL;
+	void *my_consume_q = NULL;
+	int result;
+	struct qp_guest_endpoint *queue_pair_entry = NULL;
+
+	if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
+		return VMCI_ERROR_NO_ACCESS;
+
+	mutex_lock(&qp_guest_endpoints.mutex);
+
+	queue_pair_entry = qp_guest_handle_to_entry(*handle);
+	if (queue_pair_entry) {
+		if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+			/* Local attach case. */
+			if (queue_pair_entry->qp.ref_count > 1) {
+				pr_devel("Error attempting to attach more than once\n");
+				result = VMCI_ERROR_UNAVAILABLE;
+				goto error_keep_entry;
+			}
+
+			if (queue_pair_entry->qp.produce_size != consume_size ||
+			    queue_pair_entry->qp.consume_size !=
+			    produce_size ||
+			    queue_pair_entry->qp.flags !=
+			    (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
+				pr_devel("Error mismatched queue pair in local attach\n");
+				result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
+				goto error_keep_entry;
+			}
+
+			/*
+			 * Do a local attach.  We swap the consume and
+			 * produce queues for the attacher and deliver
+			 * an attach event.
+			 */
+			result = qp_notify_peer_local(true, *handle);
+			if (result < VMCI_SUCCESS)
+				goto error_keep_entry;
+
+			my_produce_q = queue_pair_entry->consume_q;
+			my_consume_q = queue_pair_entry->produce_q;
+			goto out;
+		}
+
+		result = VMCI_ERROR_ALREADY_EXISTS;
+		goto error_keep_entry;
+	}
+
+	my_produce_q = qp_alloc_queue(produce_size, flags);
+	if (!my_produce_q) {
+		pr_warn("Error allocating pages for produce queue\n");
+		result = VMCI_ERROR_NO_MEM;
+		goto error;
+	}
+
+	my_consume_q = qp_alloc_queue(consume_size, flags);
+	if (!my_consume_q) {
+		pr_warn("Error allocating pages for consume queue\n");
+		result = VMCI_ERROR_NO_MEM;
+		goto error;
+	}
+
+	queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
+						    produce_size, consume_size,
+						    my_produce_q, my_consume_q);
+	if (!queue_pair_entry) {
+		pr_warn("Error allocating memory in %s\n", __func__);
+		result = VMCI_ERROR_NO_MEM;
+		goto error;
+	}
+
+	result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
+				  num_consume_pages,
+				  &queue_pair_entry->ppn_set);
+	if (result < VMCI_SUCCESS) {
+		pr_warn("qp_alloc_ppn_set failed\n");
+		goto error;
+	}
+
+	/*
+	 * It's only necessary to notify the host if this queue pair will be
+	 * attached to from another context.
+	 */
+	if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
+		/* Local create case. */
+		u32 context_id = vmci_get_context_id();
+
+		/*
+		 * Enforce similar checks on local queue pairs as we
+		 * do for regular ones.  The handle's context must
+		 * match the creator or attacher context id (here they
+		 * are both the current context id) and the
+		 * attach-only flag cannot exist during create.  We
+		 * also ensure specified peer is this context or an
+		 * invalid one.
+		 */
+		if (queue_pair_entry->qp.handle.context != context_id ||
+		    (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
+		     queue_pair_entry->qp.peer != context_id)) {
+			result = VMCI_ERROR_NO_ACCESS;
+			goto error;
+		}
+
+		if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
+			result = VMCI_ERROR_NOT_FOUND;
+			goto error;
+		}
+	} else {
+		result = qp_alloc_hypercall(queue_pair_entry);
+		if (result < VMCI_SUCCESS) {
+			pr_warn("qp_alloc_hypercall result = %d\n", result);
+			goto error;
+		}
+	}
+
+	qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
+			    (struct vmci_queue *)my_consume_q);
+
+	qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
+
+ out:
+	queue_pair_entry->qp.ref_count++;
+	*handle = queue_pair_entry->qp.handle;
+	*produce_q = (struct vmci_queue *)my_produce_q;
+	*consume_q = (struct vmci_queue *)my_consume_q;
+
+	/*
+	 * We should initialize the queue pair header pages on a local
+	 * queue pair create.  For non-local queue pairs, the
+	 * hypervisor initializes the header pages in the create step.
+	 */
+	if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
+	    queue_pair_entry->qp.ref_count == 1) {
+		vmci_q_header_init((*produce_q)->q_header, *handle);
+		vmci_q_header_init((*consume_q)->q_header, *handle);
+	}
+
+	mutex_unlock(&qp_guest_endpoints.mutex);
+
+	return VMCI_SUCCESS;
+
+ error:
+	mutex_unlock(&qp_guest_endpoints.mutex);
+	if (queue_pair_entry) {
+		/* The queues will be freed inside the destroy routine. */
+		qp_guest_endpoint_destroy(queue_pair_entry);
+	} else {
+		qp_free_queue(my_produce_q, produce_size);
+		qp_free_queue(my_consume_q, consume_size);
+	}
+	return result;
+
+ error_keep_entry:
+	/* This path should only be used when an existing entry was found. */
+	mutex_unlock(&qp_guest_endpoints.mutex);
+	return result;
+}
+
+/*
+ * The first endpoint issuing a queue pair allocation will create the state
+ * of the queue pair in the queue pair broker.
+ *
+ * If the creator is a guest, it will associate a VMX virtual address range
+ * with the queue pair as specified by the page_store. For compatibility with
+ * older VMX'en, that would use a separate step to set the VMX virtual
+ * address range, the virtual address range can be registered later using
+ * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
+ * used.
+ *
+ * If the creator is the host, a page_store of NULL should be used as well,
+ * since the host is not able to supply a page store for the queue pair.
+ *
+ * For older VMX and host callers, the queue pair will be created in the
+ * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
+ * created in VMCOQPB_CREATED_MEM state.
+ */
+static int qp_broker_create(struct vmci_handle handle,
+			    u32 peer,
+			    u32 flags,
+			    u32 priv_flags,
+			    u64 produce_size,
+			    u64 consume_size,
+			    struct vmci_qp_page_store *page_store,
+			    struct vmci_ctx *context,
+			    vmci_event_release_cb wakeup_cb,
+			    void *client_data, struct qp_broker_entry **ent)
+{
+	struct qp_broker_entry *entry = NULL;
+	const u32 context_id = vmci_ctx_get_id(context);
+	bool is_local = flags & VMCI_QPFLAG_LOCAL;
+	int result;
+	u64 guest_produce_size;
+	u64 guest_consume_size;
+
+	/* Do not create if the caller asked not to. */
+	if (flags & VMCI_QPFLAG_ATTACH_ONLY)
+		return VMCI_ERROR_NOT_FOUND;
+
+	/*
+	 * Creator's context ID should match handle's context ID or the creator
+	 * must allow the context in handle's context ID as the "peer".
+	 */
+	if (handle.context != context_id && handle.context != peer)
+		return VMCI_ERROR_NO_ACCESS;
+
+	if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
+		return VMCI_ERROR_DST_UNREACHABLE;
+
+	/*
+	 * Creator's context ID for local queue pairs should match the
+	 * peer, if a peer is specified.
+	 */
+	if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
+		return VMCI_ERROR_NO_ACCESS;
+
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+	if (!entry)
+		return VMCI_ERROR_NO_MEM;
+
+	if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
+		/*
+		 * The queue pair broker entry stores values from the guest
+		 * point of view, so a creating host side endpoint should swap
+		 * produce and consume values -- unless it is a local queue
+		 * pair, in which case no swapping is necessary, since the local
+		 * attacher will swap queues.
+		 */
+
+		guest_produce_size = consume_size;
+		guest_consume_size = produce_size;
+	} else {
+		guest_produce_size = produce_size;
+		guest_consume_size = consume_size;
+	}
+
+	entry->qp.handle = handle;
+	entry->qp.peer = peer;
+	entry->qp.flags = flags;
+	entry->qp.produce_size = guest_produce_size;
+	entry->qp.consume_size = guest_consume_size;
+	entry->qp.ref_count = 1;
+	entry->create_id = context_id;
+	entry->attach_id = VMCI_INVALID_ID;
+	entry->state = VMCIQPB_NEW;
+	entry->require_trusted_attach =
+	    !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
+	entry->created_by_trusted =
+	    !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
+	entry->vmci_page_files = false;
+	entry->wakeup_cb = wakeup_cb;
+	entry->client_data = client_data;
+	entry->produce_q = qp_host_alloc_queue(guest_produce_size);
+	if (entry->produce_q == NULL) {
+		result = VMCI_ERROR_NO_MEM;
+		goto error;
+	}
+	entry->consume_q = qp_host_alloc_queue(guest_consume_size);
+	if (entry->consume_q == NULL) {
+		result = VMCI_ERROR_NO_MEM;
+		goto error;
+	}
+
+	qp_init_queue_mutex(entry->produce_q, entry->consume_q);
+
+	INIT_LIST_HEAD(&entry->qp.list_item);
+
+	if (is_local) {
+		u8 *tmp;
+
+		entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
+					   PAGE_SIZE, GFP_KERNEL);
+		if (entry->local_mem == NULL) {
+			result = VMCI_ERROR_NO_MEM;
+			goto error;
+		}
+		entry->state = VMCIQPB_CREATED_MEM;
+		entry->produce_q->q_header = entry->local_mem;
+		tmp = (u8 *)entry->local_mem + PAGE_SIZE *
+		    (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
+		entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
+	} else if (page_store) {
+		/*
+		 * The VMX already initialized the queue pair headers, so no
+		 * need for the kernel side to do that.
+		 */
+		result = qp_host_register_user_memory(page_store,
+						      entry->produce_q,
+						      entry->consume_q);
+		if (result < VMCI_SUCCESS)
+			goto error;
+
+		entry->state = VMCIQPB_CREATED_MEM;
+	} else {
+		/*
+		 * A create without a page_store may be either a host
+		 * side create (in which case we are waiting for the
+		 * guest side to supply the memory) or an old style
+		 * queue pair create (in which case we will expect a
+		 * set page store call as the next step).
+		 */
+		entry->state = VMCIQPB_CREATED_NO_MEM;
+	}
+
+	qp_list_add_entry(&qp_broker_list, &entry->qp);
+	if (ent != NULL)
+		*ent = entry;
+
+	/* Add to resource obj */
+	result = vmci_resource_add(&entry->resource,
+				   VMCI_RESOURCE_TYPE_QPAIR_HOST,
+				   handle);
+	if (result != VMCI_SUCCESS) {
+		pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
+			handle.context, handle.resource, result);
+		goto error;
+	}
+
+	entry->qp.handle = vmci_resource_handle(&entry->resource);
+	if (is_local) {
+		vmci_q_header_init(entry->produce_q->q_header,
+				   entry->qp.handle);
+		vmci_q_header_init(entry->consume_q->q_header,
+				   entry->qp.handle);
+	}
+
+	vmci_ctx_qp_create(context, entry->qp.handle);
+
+	return VMCI_SUCCESS;
+
+ error:
+	if (entry != NULL) {
+		qp_host_free_queue(entry->produce_q, guest_produce_size);
+		qp_host_free_queue(entry->consume_q, guest_consume_size);
+		kfree(entry);
+	}
+
+	return result;
+}
+
+/*
+ * Enqueues an event datagram to notify the peer VM attached to
+ * the given queue pair handle about attach/detach event by the
+ * given VM.  Returns Payload size of datagram enqueued on
+ * success, error code otherwise.
+ */
+static int qp_notify_peer(bool attach,
+			  struct vmci_handle handle,
+			  u32 my_id,
+			  u32 peer_id)
+{
+	int rv;
+	struct vmci_event_qp ev;
+
+	if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
+	    peer_id == VMCI_INVALID_ID)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	/*
+	 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
+	 * number of pending events from the hypervisor to a given VM
+	 * otherwise a rogue VM could do an arbitrary number of attach
+	 * and detach operations causing memory pressure in the host
+	 * kernel.
+	 */
+
+	ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
+	ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
+					  VMCI_CONTEXT_RESOURCE_ID);
+	ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
+	ev.msg.event_data.event = attach ?
+	    VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
+	ev.payload.handle = handle;
+	ev.payload.peer_id = my_id;
+
+	rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
+				    &ev.msg.hdr, false);
+	if (rv < VMCI_SUCCESS)
+		pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
+			attach ? "ATTACH" : "DETACH", peer_id);
+
+	return rv;
+}
+
+/*
+ * The second endpoint issuing a queue pair allocation will attach to
+ * the queue pair registered with the queue pair broker.
+ *
+ * If the attacher is a guest, it will associate a VMX virtual address
+ * range with the queue pair as specified by the page_store. At this
+ * point, the already attach host endpoint may start using the queue
+ * pair, and an attach event is sent to it. For compatibility with
+ * older VMX'en, that used a separate step to set the VMX virtual
+ * address range, the virtual address range can be registered later
+ * using vmci_qp_broker_set_page_store. In that case, a page_store of
+ * NULL should be used, and the attach event will be generated once
+ * the actual page store has been set.
+ *
+ * If the attacher is the host, a page_store of NULL should be used as
+ * well, since the page store information is already set by the guest.
+ *
+ * For new VMX and host callers, the queue pair will be moved to the
+ * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
+ * moved to the VMCOQPB_ATTACHED_NO_MEM state.
+ */
+static int qp_broker_attach(struct qp_broker_entry *entry,
+			    u32 peer,
+			    u32 flags,
+			    u32 priv_flags,
+			    u64 produce_size,
+			    u64 consume_size,
+			    struct vmci_qp_page_store *page_store,
+			    struct vmci_ctx *context,
+			    vmci_event_release_cb wakeup_cb,
+			    void *client_data,
+			    struct qp_broker_entry **ent)
+{
+	const u32 context_id = vmci_ctx_get_id(context);
+	bool is_local = flags & VMCI_QPFLAG_LOCAL;
+	int result;
+
+	if (entry->state != VMCIQPB_CREATED_NO_MEM &&
+	    entry->state != VMCIQPB_CREATED_MEM)
+		return VMCI_ERROR_UNAVAILABLE;
+
+	if (is_local) {
+		if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
+		    context_id != entry->create_id) {
+			return VMCI_ERROR_INVALID_ARGS;
+		}
+	} else if (context_id == entry->create_id ||
+		   context_id == entry->attach_id) {
+		return VMCI_ERROR_ALREADY_EXISTS;
+	}
+
+	if (VMCI_CONTEXT_IS_VM(context_id) &&
+	    VMCI_CONTEXT_IS_VM(entry->create_id))
+		return VMCI_ERROR_DST_UNREACHABLE;
+
+	/*
+	 * If we are attaching from a restricted context then the queuepair
+	 * must have been created by a trusted endpoint.
+	 */
+	if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
+	    !entry->created_by_trusted)
+		return VMCI_ERROR_NO_ACCESS;
+
+	/*
+	 * If we are attaching to a queuepair that was created by a restricted
+	 * context then we must be trusted.
+	 */
+	if (entry->require_trusted_attach &&
+	    (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
+		return VMCI_ERROR_NO_ACCESS;
+
+	/*
+	 * If the creator specifies VMCI_INVALID_ID in "peer" field, access
+	 * control check is not performed.
+	 */
+	if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
+		return VMCI_ERROR_NO_ACCESS;
+
+	if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
+		/*
+		 * Do not attach if the caller doesn't support Host Queue Pairs
+		 * and a host created this queue pair.
+		 */
+
+		if (!vmci_ctx_supports_host_qp(context))
+			return VMCI_ERROR_INVALID_RESOURCE;
+
+	} else if (context_id == VMCI_HOST_CONTEXT_ID) {
+		struct vmci_ctx *create_context;
+		bool supports_host_qp;
+
+		/*
+		 * Do not attach a host to a user created queue pair if that
+		 * user doesn't support host queue pair end points.
+		 */
+
+		create_context = vmci_ctx_get(entry->create_id);
+		supports_host_qp = vmci_ctx_supports_host_qp(create_context);
+		vmci_ctx_put(create_context);
+
+		if (!supports_host_qp)
+			return VMCI_ERROR_INVALID_RESOURCE;
+	}
+
+	if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
+		return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+
+	if (context_id != VMCI_HOST_CONTEXT_ID) {
+		/*
+		 * The queue pair broker entry stores values from the guest
+		 * point of view, so an attaching guest should match the values
+		 * stored in the entry.
+		 */
+
+		if (entry->qp.produce_size != produce_size ||
+		    entry->qp.consume_size != consume_size) {
+			return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+		}
+	} else if (entry->qp.produce_size != consume_size ||
+		   entry->qp.consume_size != produce_size) {
+		return VMCI_ERROR_QUEUEPAIR_MISMATCH;
+	}
+
+	if (context_id != VMCI_HOST_CONTEXT_ID) {
+		/*
+		 * If a guest attached to a queue pair, it will supply
+		 * the backing memory.  If this is a pre NOVMVM vmx,
+		 * the backing memory will be supplied by calling
+		 * vmci_qp_broker_set_page_store() following the
+		 * return of the vmci_qp_broker_alloc() call. If it is
+		 * a vmx of version NOVMVM or later, the page store
+		 * must be supplied as part of the
+		 * vmci_qp_broker_alloc call.  Under all circumstances
+		 * must the initially created queue pair not have any
+		 * memory associated with it already.
+		 */
+
+		if (entry->state != VMCIQPB_CREATED_NO_MEM)
+			return VMCI_ERROR_INVALID_ARGS;
+
+		if (page_store != NULL) {
+			/*
+			 * Patch up host state to point to guest
+			 * supplied memory. The VMX already
+			 * initialized the queue pair headers, so no
+			 * need for the kernel side to do that.
+			 */
+
+			result = qp_host_register_user_memory(page_store,
+							      entry->produce_q,
+							      entry->consume_q);
+			if (result < VMCI_SUCCESS)
+				return result;
+
+			/*
+			 * Preemptively load in the headers if non-blocking to
+			 * prevent blocking later.
+			 */
+			if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
+				result = qp_host_map_queues(entry->produce_q,
+							    entry->consume_q);
+				if (result < VMCI_SUCCESS) {
+					qp_host_unregister_user_memory(
+						entry->produce_q,
+						entry->consume_q);
+					return result;
+				}
+			}
+
+			entry->state = VMCIQPB_ATTACHED_MEM;
+		} else {
+			entry->state = VMCIQPB_ATTACHED_NO_MEM;
+		}
+	} else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
+		/*
+		 * The host side is attempting to attach to a queue
+		 * pair that doesn't have any memory associated with
+		 * it. This must be a pre NOVMVM vmx that hasn't set
+		 * the page store information yet, or a quiesced VM.
+		 */
+
+		return VMCI_ERROR_UNAVAILABLE;
+	} else {
+		/*
+		 * For non-blocking queue pairs, we cannot rely on
+		 * enqueue/dequeue to map in the pages on the
+		 * host-side, since it may block, so we make an
+		 * attempt here.
+		 */
+
+		if (flags & VMCI_QPFLAG_NONBLOCK) {
+			result =
+			    qp_host_map_queues(entry->produce_q,
+					       entry->consume_q);
+			if (result < VMCI_SUCCESS)
+				return result;
+
+			entry->qp.flags |= flags &
+			    (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
+		}
+
+		/* The host side has successfully attached to a queue pair. */
+		entry->state = VMCIQPB_ATTACHED_MEM;
+	}
+
+	if (entry->state == VMCIQPB_ATTACHED_MEM) {
+		result =
+		    qp_notify_peer(true, entry->qp.handle, context_id,
+				   entry->create_id);
+		if (result < VMCI_SUCCESS)
+			pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
+				entry->create_id, entry->qp.handle.context,
+				entry->qp.handle.resource);
+	}
+
+	entry->attach_id = context_id;
+	entry->qp.ref_count++;
+	if (wakeup_cb) {
+		entry->wakeup_cb = wakeup_cb;
+		entry->client_data = client_data;
+	}
+
+	/*
+	 * When attaching to local queue pairs, the context already has
+	 * an entry tracking the queue pair, so don't add another one.
+	 */
+	if (!is_local)
+		vmci_ctx_qp_create(context, entry->qp.handle);
+
+	if (ent != NULL)
+		*ent = entry;
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * queue_pair_Alloc for use when setting up queue pair endpoints
+ * on the host.
+ */
+static int qp_broker_alloc(struct vmci_handle handle,
+			   u32 peer,
+			   u32 flags,
+			   u32 priv_flags,
+			   u64 produce_size,
+			   u64 consume_size,
+			   struct vmci_qp_page_store *page_store,
+			   struct vmci_ctx *context,
+			   vmci_event_release_cb wakeup_cb,
+			   void *client_data,
+			   struct qp_broker_entry **ent,
+			   bool *swap)
+{
+	const u32 context_id = vmci_ctx_get_id(context);
+	bool create;
+	struct qp_broker_entry *entry = NULL;
+	bool is_local = flags & VMCI_QPFLAG_LOCAL;
+	int result;
+
+	if (vmci_handle_is_invalid(handle) ||
+	    (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
+	    !(produce_size || consume_size) ||
+	    !context || context_id == VMCI_INVALID_ID ||
+	    handle.context == VMCI_INVALID_ID) {
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	/*
+	 * In the initial argument check, we ensure that non-vmkernel hosts
+	 * are not allowed to create local queue pairs.
+	 */
+
+	mutex_lock(&qp_broker_list.mutex);
+
+	if (!is_local && vmci_ctx_qp_exists(context, handle)) {
+		pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
+			 context_id, handle.context, handle.resource);
+		mutex_unlock(&qp_broker_list.mutex);
+		return VMCI_ERROR_ALREADY_EXISTS;
+	}
+
+	if (handle.resource != VMCI_INVALID_ID)
+		entry = qp_broker_handle_to_entry(handle);
+
+	if (!entry) {
+		create = true;
+		result =
+		    qp_broker_create(handle, peer, flags, priv_flags,
+				     produce_size, consume_size, page_store,
+				     context, wakeup_cb, client_data, ent);
+	} else {
+		create = false;
+		result =
+		    qp_broker_attach(entry, peer, flags, priv_flags,
+				     produce_size, consume_size, page_store,
+				     context, wakeup_cb, client_data, ent);
+	}
+
+	mutex_unlock(&qp_broker_list.mutex);
+
+	if (swap)
+		*swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
+		    !(create && is_local);
+
+	return result;
+}
+
+/*
+ * This function implements the kernel API for allocating a queue
+ * pair.
+ */
+static int qp_alloc_host_work(struct vmci_handle *handle,
+			      struct vmci_queue **produce_q,
+			      u64 produce_size,
+			      struct vmci_queue **consume_q,
+			      u64 consume_size,
+			      u32 peer,
+			      u32 flags,
+			      u32 priv_flags,
+			      vmci_event_release_cb wakeup_cb,
+			      void *client_data)
+{
+	struct vmci_handle new_handle;
+	struct vmci_ctx *context;
+	struct qp_broker_entry *entry;
+	int result;
+	bool swap;
+
+	if (vmci_handle_is_invalid(*handle)) {
+		new_handle = vmci_make_handle(
+			VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
+	} else
+		new_handle = *handle;
+
+	context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
+	entry = NULL;
+	result =
+	    qp_broker_alloc(new_handle, peer, flags, priv_flags,
+			    produce_size, consume_size, NULL, context,
+			    wakeup_cb, client_data, &entry, &swap);
+	if (result == VMCI_SUCCESS) {
+		if (swap) {
+			/*
+			 * If this is a local queue pair, the attacher
+			 * will swap around produce and consume
+			 * queues.
+			 */
+
+			*produce_q = entry->consume_q;
+			*consume_q = entry->produce_q;
+		} else {
+			*produce_q = entry->produce_q;
+			*consume_q = entry->consume_q;
+		}
+
+		*handle = vmci_resource_handle(&entry->resource);
+	} else {
+		*handle = VMCI_INVALID_HANDLE;
+		pr_devel("queue pair broker failed to alloc (result=%d)\n",
+			 result);
+	}
+	vmci_ctx_put(context);
+	return result;
+}
+
+/*
+ * Allocates a VMCI queue_pair. Only checks validity of input
+ * arguments. The real work is done in the host or guest
+ * specific function.
+ */
+int vmci_qp_alloc(struct vmci_handle *handle,
+		  struct vmci_queue **produce_q,
+		  u64 produce_size,
+		  struct vmci_queue **consume_q,
+		  u64 consume_size,
+		  u32 peer,
+		  u32 flags,
+		  u32 priv_flags,
+		  bool guest_endpoint,
+		  vmci_event_release_cb wakeup_cb,
+		  void *client_data)
+{
+	if (!handle || !produce_q || !consume_q ||
+	    (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	if (guest_endpoint) {
+		return qp_alloc_guest_work(handle, produce_q,
+					   produce_size, consume_q,
+					   consume_size, peer,
+					   flags, priv_flags);
+	} else {
+		return qp_alloc_host_work(handle, produce_q,
+					  produce_size, consume_q,
+					  consume_size, peer, flags,
+					  priv_flags, wakeup_cb, client_data);
+	}
+}
+
+/*
+ * This function implements the host kernel API for detaching from
+ * a queue pair.
+ */
+static int qp_detatch_host_work(struct vmci_handle handle)
+{
+	int result;
+	struct vmci_ctx *context;
+
+	context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
+
+	result = vmci_qp_broker_detach(handle, context);
+
+	vmci_ctx_put(context);
+	return result;
+}
+
+/*
+ * Detaches from a VMCI queue_pair. Only checks validity of input argument.
+ * Real work is done in the host or guest specific function.
+ */
+static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
+{
+	if (vmci_handle_is_invalid(handle))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	if (guest_endpoint)
+		return qp_detatch_guest_work(handle);
+	else
+		return qp_detatch_host_work(handle);
+}
+
+/*
+ * Returns the entry from the head of the list. Assumes that the list is
+ * locked.
+ */
+static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
+{
+	if (!list_empty(&qp_list->head)) {
+		struct qp_entry *entry =
+		    list_first_entry(&qp_list->head, struct qp_entry,
+				     list_item);
+		return entry;
+	}
+
+	return NULL;
+}
+
+void vmci_qp_broker_exit(void)
+{
+	struct qp_entry *entry;
+	struct qp_broker_entry *be;
+
+	mutex_lock(&qp_broker_list.mutex);
+
+	while ((entry = qp_list_get_head(&qp_broker_list))) {
+		be = (struct qp_broker_entry *)entry;
+
+		qp_list_remove_entry(&qp_broker_list, entry);
+		kfree(be);
+	}
+
+	mutex_unlock(&qp_broker_list.mutex);
+}
+
+/*
+ * Requests that a queue pair be allocated with the VMCI queue
+ * pair broker. Allocates a queue pair entry if one does not
+ * exist. Attaches to one if it exists, and retrieves the page
+ * files backing that queue_pair.  Assumes that the queue pair
+ * broker lock is held.
+ */
+int vmci_qp_broker_alloc(struct vmci_handle handle,
+			 u32 peer,
+			 u32 flags,
+			 u32 priv_flags,
+			 u64 produce_size,
+			 u64 consume_size,
+			 struct vmci_qp_page_store *page_store,
+			 struct vmci_ctx *context)
+{
+	return qp_broker_alloc(handle, peer, flags, priv_flags,
+			       produce_size, consume_size,
+			       page_store, context, NULL, NULL, NULL, NULL);
+}
+
+/*
+ * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
+ * step to add the UVAs of the VMX mapping of the queue pair. This function
+ * provides backwards compatibility with such VMX'en, and takes care of
+ * registering the page store for a queue pair previously allocated by the
+ * VMX during create or attach. This function will move the queue pair state
+ * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
+ * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
+ * attached state with memory, the queue pair is ready to be used by the
+ * host peer, and an attached event will be generated.
+ *
+ * Assumes that the queue pair broker lock is held.
+ *
+ * This function is only used by the hosted platform, since there is no
+ * issue with backwards compatibility for vmkernel.
+ */
+int vmci_qp_broker_set_page_store(struct vmci_handle handle,
+				  u64 produce_uva,
+				  u64 consume_uva,
+				  struct vmci_ctx *context)
+{
+	struct qp_broker_entry *entry;
+	int result;
+	const u32 context_id = vmci_ctx_get_id(context);
+
+	if (vmci_handle_is_invalid(handle) || !context ||
+	    context_id == VMCI_INVALID_ID)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	/*
+	 * We only support guest to host queue pairs, so the VMX must
+	 * supply UVAs for the mapped page files.
+	 */
+
+	if (produce_uva == 0 || consume_uva == 0)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	mutex_lock(&qp_broker_list.mutex);
+
+	if (!vmci_ctx_qp_exists(context, handle)) {
+		pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+			context_id, handle.context, handle.resource);
+		result = VMCI_ERROR_NOT_FOUND;
+		goto out;
+	}
+
+	entry = qp_broker_handle_to_entry(handle);
+	if (!entry) {
+		result = VMCI_ERROR_NOT_FOUND;
+		goto out;
+	}
+
+	/*
+	 * If I'm the owner then I can set the page store.
+	 *
+	 * Or, if a host created the queue_pair and I'm the attached peer
+	 * then I can set the page store.
+	 */
+	if (entry->create_id != context_id &&
+	    (entry->create_id != VMCI_HOST_CONTEXT_ID ||
+	     entry->attach_id != context_id)) {
+		result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
+		goto out;
+	}
+
+	if (entry->state != VMCIQPB_CREATED_NO_MEM &&
+	    entry->state != VMCIQPB_ATTACHED_NO_MEM) {
+		result = VMCI_ERROR_UNAVAILABLE;
+		goto out;
+	}
+
+	result = qp_host_get_user_memory(produce_uva, consume_uva,
+					 entry->produce_q, entry->consume_q);
+	if (result < VMCI_SUCCESS)
+		goto out;
+
+	result = qp_host_map_queues(entry->produce_q, entry->consume_q);
+	if (result < VMCI_SUCCESS) {
+		qp_host_unregister_user_memory(entry->produce_q,
+					       entry->consume_q);
+		goto out;
+	}
+
+	if (entry->state == VMCIQPB_CREATED_NO_MEM)
+		entry->state = VMCIQPB_CREATED_MEM;
+	else
+		entry->state = VMCIQPB_ATTACHED_MEM;
+
+	entry->vmci_page_files = true;
+
+	if (entry->state == VMCIQPB_ATTACHED_MEM) {
+		result =
+		    qp_notify_peer(true, handle, context_id, entry->create_id);
+		if (result < VMCI_SUCCESS) {
+			pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
+				entry->create_id, entry->qp.handle.context,
+				entry->qp.handle.resource);
+		}
+	}
+
+	result = VMCI_SUCCESS;
+ out:
+	mutex_unlock(&qp_broker_list.mutex);
+	return result;
+}
+
+/*
+ * Resets saved queue headers for the given QP broker
+ * entry. Should be used when guest memory becomes available
+ * again, or the guest detaches.
+ */
+static void qp_reset_saved_headers(struct qp_broker_entry *entry)
+{
+	entry->produce_q->saved_header = NULL;
+	entry->consume_q->saved_header = NULL;
+}
+
+/*
+ * The main entry point for detaching from a queue pair registered with the
+ * queue pair broker. If more than one endpoint is attached to the queue
+ * pair, the first endpoint will mainly decrement a reference count and
+ * generate a notification to its peer. The last endpoint will clean up
+ * the queue pair state registered with the broker.
+ *
+ * When a guest endpoint detaches, it will unmap and unregister the guest
+ * memory backing the queue pair. If the host is still attached, it will
+ * no longer be able to access the queue pair content.
+ *
+ * If the queue pair is already in a state where there is no memory
+ * registered for the queue pair (any *_NO_MEM state), it will transition to
+ * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
+ * endpoint is the first of two endpoints to detach. If the host endpoint is
+ * the first out of two to detach, the queue pair will move to the
+ * VMCIQPB_SHUTDOWN_MEM state.
+ */
+int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
+{
+	struct qp_broker_entry *entry;
+	const u32 context_id = vmci_ctx_get_id(context);
+	u32 peer_id;
+	bool is_local = false;
+	int result;
+
+	if (vmci_handle_is_invalid(handle) || !context ||
+	    context_id == VMCI_INVALID_ID) {
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	mutex_lock(&qp_broker_list.mutex);
+
+	if (!vmci_ctx_qp_exists(context, handle)) {
+		pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+			 context_id, handle.context, handle.resource);
+		result = VMCI_ERROR_NOT_FOUND;
+		goto out;
+	}
+
+	entry = qp_broker_handle_to_entry(handle);
+	if (!entry) {
+		pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
+			 context_id, handle.context, handle.resource);
+		result = VMCI_ERROR_NOT_FOUND;
+		goto out;
+	}
+
+	if (context_id != entry->create_id && context_id != entry->attach_id) {
+		result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+		goto out;
+	}
+
+	if (context_id == entry->create_id) {
+		peer_id = entry->attach_id;
+		entry->create_id = VMCI_INVALID_ID;
+	} else {
+		peer_id = entry->create_id;
+		entry->attach_id = VMCI_INVALID_ID;
+	}
+	entry->qp.ref_count--;
+
+	is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+
+	if (context_id != VMCI_HOST_CONTEXT_ID) {
+		bool headers_mapped;
+
+		/*
+		 * Pre NOVMVM vmx'en may detach from a queue pair
+		 * before setting the page store, and in that case
+		 * there is no user memory to detach from. Also, more
+		 * recent VMX'en may detach from a queue pair in the
+		 * quiesced state.
+		 */
+
+		qp_acquire_queue_mutex(entry->produce_q);
+		headers_mapped = entry->produce_q->q_header ||
+		    entry->consume_q->q_header;
+		if (QPBROKERSTATE_HAS_MEM(entry)) {
+			result =
+			    qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
+						 entry->produce_q,
+						 entry->consume_q);
+			if (result < VMCI_SUCCESS)
+				pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
+					handle.context, handle.resource,
+					result);
+
+			if (entry->vmci_page_files)
+				qp_host_unregister_user_memory(entry->produce_q,
+							       entry->
+							       consume_q);
+			else
+				qp_host_unregister_user_memory(entry->produce_q,
+							       entry->
+							       consume_q);
+
+		}
+
+		if (!headers_mapped)
+			qp_reset_saved_headers(entry);
+
+		qp_release_queue_mutex(entry->produce_q);
+
+		if (!headers_mapped && entry->wakeup_cb)
+			entry->wakeup_cb(entry->client_data);
+
+	} else {
+		if (entry->wakeup_cb) {
+			entry->wakeup_cb = NULL;
+			entry->client_data = NULL;
+		}
+	}
+
+	if (entry->qp.ref_count == 0) {
+		qp_list_remove_entry(&qp_broker_list, &entry->qp);
+
+		if (is_local)
+			kfree(entry->local_mem);
+
+		qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
+		qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
+		qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
+		/* Unlink from resource hash table and free callback */
+		vmci_resource_remove(&entry->resource);
+
+		kfree(entry);
+
+		vmci_ctx_qp_destroy(context, handle);
+	} else {
+		qp_notify_peer(false, handle, context_id, peer_id);
+		if (context_id == VMCI_HOST_CONTEXT_ID &&
+		    QPBROKERSTATE_HAS_MEM(entry)) {
+			entry->state = VMCIQPB_SHUTDOWN_MEM;
+		} else {
+			entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
+		}
+
+		if (!is_local)
+			vmci_ctx_qp_destroy(context, handle);
+
+	}
+	result = VMCI_SUCCESS;
+ out:
+	mutex_unlock(&qp_broker_list.mutex);
+	return result;
+}
+
+/*
+ * Establishes the necessary mappings for a queue pair given a
+ * reference to the queue pair guest memory. This is usually
+ * called when a guest is unquiesced and the VMX is allowed to
+ * map guest memory once again.
+ */
+int vmci_qp_broker_map(struct vmci_handle handle,
+		       struct vmci_ctx *context,
+		       u64 guest_mem)
+{
+	struct qp_broker_entry *entry;
+	const u32 context_id = vmci_ctx_get_id(context);
+	bool is_local = false;
+	int result;
+
+	if (vmci_handle_is_invalid(handle) || !context ||
+	    context_id == VMCI_INVALID_ID)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	mutex_lock(&qp_broker_list.mutex);
+
+	if (!vmci_ctx_qp_exists(context, handle)) {
+		pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+			 context_id, handle.context, handle.resource);
+		result = VMCI_ERROR_NOT_FOUND;
+		goto out;
+	}
+
+	entry = qp_broker_handle_to_entry(handle);
+	if (!entry) {
+		pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
+			 context_id, handle.context, handle.resource);
+		result = VMCI_ERROR_NOT_FOUND;
+		goto out;
+	}
+
+	if (context_id != entry->create_id && context_id != entry->attach_id) {
+		result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+		goto out;
+	}
+
+	is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+	result = VMCI_SUCCESS;
+
+	if (context_id != VMCI_HOST_CONTEXT_ID) {
+		struct vmci_qp_page_store page_store;
+
+		page_store.pages = guest_mem;
+		page_store.len = QPE_NUM_PAGES(entry->qp);
+
+		qp_acquire_queue_mutex(entry->produce_q);
+		qp_reset_saved_headers(entry);
+		result =
+		    qp_host_register_user_memory(&page_store,
+						 entry->produce_q,
+						 entry->consume_q);
+		qp_release_queue_mutex(entry->produce_q);
+		if (result == VMCI_SUCCESS) {
+			/* Move state from *_NO_MEM to *_MEM */
+
+			entry->state++;
+
+			if (entry->wakeup_cb)
+				entry->wakeup_cb(entry->client_data);
+		}
+	}
+
+ out:
+	mutex_unlock(&qp_broker_list.mutex);
+	return result;
+}
+
+/*
+ * Saves a snapshot of the queue headers for the given QP broker
+ * entry. Should be used when guest memory is unmapped.
+ * Results:
+ * VMCI_SUCCESS on success, appropriate error code if guest memory
+ * can't be accessed..
+ */
+static int qp_save_headers(struct qp_broker_entry *entry)
+{
+	int result;
+
+	if (entry->produce_q->saved_header != NULL &&
+	    entry->consume_q->saved_header != NULL) {
+		/*
+		 *  If the headers have already been saved, we don't need to do
+		 *  it again, and we don't want to map in the headers
+		 *  unnecessarily.
+		 */
+
+		return VMCI_SUCCESS;
+	}
+
+	if (NULL == entry->produce_q->q_header ||
+	    NULL == entry->consume_q->q_header) {
+		result = qp_host_map_queues(entry->produce_q, entry->consume_q);
+		if (result < VMCI_SUCCESS)
+			return result;
+	}
+
+	memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
+	       sizeof(entry->saved_produce_q));
+	entry->produce_q->saved_header = &entry->saved_produce_q;
+	memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
+	       sizeof(entry->saved_consume_q));
+	entry->consume_q->saved_header = &entry->saved_consume_q;
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Removes all references to the guest memory of a given queue pair, and
+ * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
+ * called when a VM is being quiesced where access to guest memory should
+ * avoided.
+ */
+int vmci_qp_broker_unmap(struct vmci_handle handle,
+			 struct vmci_ctx *context,
+			 u32 gid)
+{
+	struct qp_broker_entry *entry;
+	const u32 context_id = vmci_ctx_get_id(context);
+	bool is_local = false;
+	int result;
+
+	if (vmci_handle_is_invalid(handle) || !context ||
+	    context_id == VMCI_INVALID_ID)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	mutex_lock(&qp_broker_list.mutex);
+
+	if (!vmci_ctx_qp_exists(context, handle)) {
+		pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
+			 context_id, handle.context, handle.resource);
+		result = VMCI_ERROR_NOT_FOUND;
+		goto out;
+	}
+
+	entry = qp_broker_handle_to_entry(handle);
+	if (!entry) {
+		pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
+			 context_id, handle.context, handle.resource);
+		result = VMCI_ERROR_NOT_FOUND;
+		goto out;
+	}
+
+	if (context_id != entry->create_id && context_id != entry->attach_id) {
+		result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+		goto out;
+	}
+
+	is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
+
+	if (context_id != VMCI_HOST_CONTEXT_ID) {
+		qp_acquire_queue_mutex(entry->produce_q);
+		result = qp_save_headers(entry);
+		if (result < VMCI_SUCCESS)
+			pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
+				handle.context, handle.resource, result);
+
+		qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
+
+		/*
+		 * On hosted, when we unmap queue pairs, the VMX will also
+		 * unmap the guest memory, so we invalidate the previously
+		 * registered memory. If the queue pair is mapped again at a
+		 * later point in time, we will need to reregister the user
+		 * memory with a possibly new user VA.
+		 */
+		qp_host_unregister_user_memory(entry->produce_q,
+					       entry->consume_q);
+
+		/*
+		 * Move state from *_MEM to *_NO_MEM.
+		 */
+		entry->state--;
+
+		qp_release_queue_mutex(entry->produce_q);
+	}
+
+	result = VMCI_SUCCESS;
+
+ out:
+	mutex_unlock(&qp_broker_list.mutex);
+	return result;
+}
+
+/*
+ * Destroys all guest queue pair endpoints. If active guest queue
+ * pairs still exist, hypercalls to attempt detach from these
+ * queue pairs will be made. Any failure to detach is silently
+ * ignored.
+ */
+void vmci_qp_guest_endpoints_exit(void)
+{
+	struct qp_entry *entry;
+	struct qp_guest_endpoint *ep;
+
+	mutex_lock(&qp_guest_endpoints.mutex);
+
+	while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
+		ep = (struct qp_guest_endpoint *)entry;
+
+		/* Don't make a hypercall for local queue_pairs. */
+		if (!(entry->flags & VMCI_QPFLAG_LOCAL))
+			qp_detatch_hypercall(entry->handle);
+
+		/* We cannot fail the exit, so let's reset ref_count. */
+		entry->ref_count = 0;
+		qp_list_remove_entry(&qp_guest_endpoints, entry);
+
+		qp_guest_endpoint_destroy(ep);
+	}
+
+	mutex_unlock(&qp_guest_endpoints.mutex);
+}
+
+/*
+ * Helper routine that will lock the queue pair before subsequent
+ * operations.
+ * Note: Non-blocking on the host side is currently only implemented in ESX.
+ * Since non-blocking isn't yet implemented on the host personality we
+ * have no reason to acquire a spin lock.  So to avoid the use of an
+ * unnecessary lock only acquire the mutex if we can block.
+ * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK.  Therefore
+ * we can use the same locking function for access to both the queue
+ * and the queue headers as it is the same logic.  Assert this behvior.
+ */
+static void qp_lock(const struct vmci_qp *qpair)
+{
+	if (vmci_can_block(qpair->flags))
+		qp_acquire_queue_mutex(qpair->produce_q);
+}
+
+/*
+ * Helper routine that unlocks the queue pair after calling
+ * qp_lock.  Respects non-blocking and pinning flags.
+ */
+static void qp_unlock(const struct vmci_qp *qpair)
+{
+	if (vmci_can_block(qpair->flags))
+		qp_release_queue_mutex(qpair->produce_q);
+}
+
+/*
+ * The queue headers may not be mapped at all times. If a queue is
+ * currently not mapped, it will be attempted to do so.
+ */
+static int qp_map_queue_headers(struct vmci_queue *produce_q,
+				struct vmci_queue *consume_q,
+				bool can_block)
+{
+	int result;
+
+	if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
+		if (can_block)
+			result = qp_host_map_queues(produce_q, consume_q);
+		else
+			result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
+
+		if (result < VMCI_SUCCESS)
+			return (produce_q->saved_header &&
+				consume_q->saved_header) ?
+			    VMCI_ERROR_QUEUEPAIR_NOT_READY :
+			    VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
+	}
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Helper routine that will retrieve the produce and consume
+ * headers of a given queue pair. If the guest memory of the
+ * queue pair is currently not available, the saved queue headers
+ * will be returned, if these are available.
+ */
+static int qp_get_queue_headers(const struct vmci_qp *qpair,
+				struct vmci_queue_header **produce_q_header,
+				struct vmci_queue_header **consume_q_header)
+{
+	int result;
+
+	result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q,
+				      vmci_can_block(qpair->flags));
+	if (result == VMCI_SUCCESS) {
+		*produce_q_header = qpair->produce_q->q_header;
+		*consume_q_header = qpair->consume_q->q_header;
+	} else if (qpair->produce_q->saved_header &&
+		   qpair->consume_q->saved_header) {
+		*produce_q_header = qpair->produce_q->saved_header;
+		*consume_q_header = qpair->consume_q->saved_header;
+		result = VMCI_SUCCESS;
+	}
+
+	return result;
+}
+
+/*
+ * Callback from VMCI queue pair broker indicating that a queue
+ * pair that was previously not ready, now either is ready or
+ * gone forever.
+ */
+static int qp_wakeup_cb(void *client_data)
+{
+	struct vmci_qp *qpair = (struct vmci_qp *)client_data;
+
+	qp_lock(qpair);
+	while (qpair->blocked > 0) {
+		qpair->blocked--;
+		qpair->generation++;
+		wake_up(&qpair->event);
+	}
+	qp_unlock(qpair);
+
+	return VMCI_SUCCESS;
+}
+
+/*
+ * Makes the calling thread wait for the queue pair to become
+ * ready for host side access.  Returns true when thread is
+ * woken up after queue pair state change, false otherwise.
+ */
+static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
+{
+	unsigned int generation;
+
+	if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
+		return false;
+
+	qpair->blocked++;
+	generation = qpair->generation;
+	qp_unlock(qpair);
+	wait_event(qpair->event, generation != qpair->generation);
+	qp_lock(qpair);
+
+	return true;
+}
+
+/*
+ * Enqueues a given buffer to the produce queue using the provided
+ * function. As many bytes as possible (space available in the queue)
+ * are enqueued.  Assumes the queue->mutex has been acquired.  Returns
+ * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
+ * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
+ * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
+ * an error occured when accessing the buffer,
+ * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
+ * available.  Otherwise, the number of bytes written to the queue is
+ * returned.  Updates the tail pointer of the produce queue.
+ */
+static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
+				 struct vmci_queue *consume_q,
+				 const u64 produce_q_size,
+				 const void *buf,
+				 size_t buf_size,
+				 vmci_memcpy_to_queue_func memcpy_to_queue,
+				 bool can_block)
+{
+	s64 free_space;
+	u64 tail;
+	size_t written;
+	ssize_t result;
+
+	result = qp_map_queue_headers(produce_q, consume_q, can_block);
+	if (unlikely(result != VMCI_SUCCESS))
+		return result;
+
+	free_space = vmci_q_header_free_space(produce_q->q_header,
+					      consume_q->q_header,
+					      produce_q_size);
+	if (free_space == 0)
+		return VMCI_ERROR_QUEUEPAIR_NOSPACE;
+
+	if (free_space < VMCI_SUCCESS)
+		return (ssize_t) free_space;
+
+	written = (size_t) (free_space > buf_size ? buf_size : free_space);
+	tail = vmci_q_header_producer_tail(produce_q->q_header);
+	if (likely(tail + written < produce_q_size)) {
+		result = memcpy_to_queue(produce_q, tail, buf, 0, written);
+	} else {
+		/* Tail pointer wraps around. */
+
+		const size_t tmp = (size_t) (produce_q_size - tail);
+
+		result = memcpy_to_queue(produce_q, tail, buf, 0, tmp);
+		if (result >= VMCI_SUCCESS)
+			result = memcpy_to_queue(produce_q, 0, buf, tmp,
+						 written - tmp);
+	}
+
+	if (result < VMCI_SUCCESS)
+		return result;
+
+	vmci_q_header_add_producer_tail(produce_q->q_header, written,
+					produce_q_size);
+	return written;
+}
+
+/*
+ * Dequeues data (if available) from the given consume queue. Writes data
+ * to the user provided buffer using the provided function.
+ * Assumes the queue->mutex has been acquired.
+ * Results:
+ * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
+ * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
+ * (as defined by the queue size).
+ * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
+ * Otherwise the number of bytes dequeued is returned.
+ * Side effects:
+ * Updates the head pointer of the consume queue.
+ */
+static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
+				 struct vmci_queue *consume_q,
+				 const u64 consume_q_size,
+				 void *buf,
+				 size_t buf_size,
+				 vmci_memcpy_from_queue_func memcpy_from_queue,
+				 bool update_consumer,
+				 bool can_block)
+{
+	s64 buf_ready;
+	u64 head;
+	size_t read;
+	ssize_t result;
+
+	result = qp_map_queue_headers(produce_q, consume_q, can_block);
+	if (unlikely(result != VMCI_SUCCESS))
+		return result;
+
+	buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
+					    produce_q->q_header,
+					    consume_q_size);
+	if (buf_ready == 0)
+		return VMCI_ERROR_QUEUEPAIR_NODATA;
+
+	if (buf_ready < VMCI_SUCCESS)
+		return (ssize_t) buf_ready;
+
+	read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
+	head = vmci_q_header_consumer_head(produce_q->q_header);
+	if (likely(head + read < consume_q_size)) {
+		result = memcpy_from_queue(buf, 0, consume_q, head, read);
+	} else {
+		/* Head pointer wraps around. */
+
+		const size_t tmp = (size_t) (consume_q_size - head);
+
+		result = memcpy_from_queue(buf, 0, consume_q, head, tmp);
+		if (result >= VMCI_SUCCESS)
+			result = memcpy_from_queue(buf, tmp, consume_q, 0,
+						   read - tmp);
+
+	}
+
+	if (result < VMCI_SUCCESS)
+		return result;
+
+	if (update_consumer)
+		vmci_q_header_add_consumer_head(produce_q->q_header,
+						read, consume_q_size);
+
+	return read;
+}
+
+/*
+ * vmci_qpair_alloc() - Allocates a queue pair.
+ * @qpair:      Pointer for the new vmci_qp struct.
+ * @handle:     Handle to track the resource.
+ * @produce_qsize:      Desired size of the producer queue.
+ * @consume_qsize:      Desired size of the consumer queue.
+ * @peer:       ContextID of the peer.
+ * @flags:      VMCI flags.
+ * @priv_flags: VMCI priviledge flags.
+ *
+ * This is the client interface for allocating the memory for a
+ * vmci_qp structure and then attaching to the underlying
+ * queue.  If an error occurs allocating the memory for the
+ * vmci_qp structure no attempt is made to attach.  If an
+ * error occurs attaching, then the structure is freed.
+ */
+int vmci_qpair_alloc(struct vmci_qp **qpair,
+		     struct vmci_handle *handle,
+		     u64 produce_qsize,
+		     u64 consume_qsize,
+		     u32 peer,
+		     u32 flags,
+		     u32 priv_flags)
+{
+	struct vmci_qp *my_qpair;
+	int retval;
+	struct vmci_handle src = VMCI_INVALID_HANDLE;
+	struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
+	enum vmci_route route;
+	vmci_event_release_cb wakeup_cb;
+	void *client_data;
+
+	/*
+	 * Restrict the size of a queuepair.  The device already
+	 * enforces a limit on the total amount of memory that can be
+	 * allocated to queuepairs for a guest.  However, we try to
+	 * allocate this memory before we make the queuepair
+	 * allocation hypercall.  On Linux, we allocate each page
+	 * separately, which means rather than fail, the guest will
+	 * thrash while it tries to allocate, and will become
+	 * increasingly unresponsive to the point where it appears to
+	 * be hung.  So we place a limit on the size of an individual
+	 * queuepair here, and leave the device to enforce the
+	 * restriction on total queuepair memory.  (Note that this
+	 * doesn't prevent all cases; a user with only this much
+	 * physical memory could still get into trouble.)  The error
+	 * used by the device is NO_RESOURCES, so use that here too.
+	 */
+
+	if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
+	    produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
+		return VMCI_ERROR_NO_RESOURCES;
+
+	retval = vmci_route(&src, &dst, false, &route);
+	if (retval < VMCI_SUCCESS)
+		route = vmci_guest_code_active() ?
+		    VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
+
+	/* If NONBLOCK or PINNED is set, we better be the guest personality. */
+	if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) &&
+	    VMCI_ROUTE_AS_GUEST != route) {
+		pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
+		return VMCI_ERROR_INVALID_ARGS;
+	}
+
+	/*
+	 * Limit the size of pinned QPs and check sanity.
+	 *
+	 * Pinned pages implies non-blocking mode.  Mutexes aren't acquired
+	 * when the NONBLOCK flag is set in qpair code; and also should not be
+	 * acquired when the PINNED flagged is set.  Since pinning pages
+	 * implies we want speed, it makes no sense not to have NONBLOCK
+	 * set if PINNED is set.  Hence enforce this implication.
+	 */
+	if (vmci_qp_pinned(flags)) {
+		if (vmci_can_block(flags)) {
+			pr_err("Attempted to enable pinning w/o non-blocking");
+			return VMCI_ERROR_INVALID_ARGS;
+		}
+
+		if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
+			return VMCI_ERROR_NO_RESOURCES;
+	}
+
+	my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
+	if (!my_qpair)
+		return VMCI_ERROR_NO_MEM;
+
+	my_qpair->produce_q_size = produce_qsize;
+	my_qpair->consume_q_size = consume_qsize;
+	my_qpair->peer = peer;
+	my_qpair->flags = flags;
+	my_qpair->priv_flags = priv_flags;
+
+	wakeup_cb = NULL;
+	client_data = NULL;
+
+	if (VMCI_ROUTE_AS_HOST == route) {
+		my_qpair->guest_endpoint = false;
+		if (!(flags & VMCI_QPFLAG_LOCAL)) {
+			my_qpair->blocked = 0;
+			my_qpair->generation = 0;
+			init_waitqueue_head(&my_qpair->event);
+			wakeup_cb = qp_wakeup_cb;
+			client_data = (void *)my_qpair;
+		}
+	} else {
+		my_qpair->guest_endpoint = true;
+	}
+
+	retval = vmci_qp_alloc(handle,
+			       &my_qpair->produce_q,
+			       my_qpair->produce_q_size,
+			       &my_qpair->consume_q,
+			       my_qpair->consume_q_size,
+			       my_qpair->peer,
+			       my_qpair->flags,
+			       my_qpair->priv_flags,
+			       my_qpair->guest_endpoint,
+			       wakeup_cb, client_data);
+
+	if (retval < VMCI_SUCCESS) {
+		kfree(my_qpair);
+		return retval;
+	}
+
+	*qpair = my_qpair;
+	my_qpair->handle = *handle;
+
+	return retval;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
+
+/*
+ * vmci_qpair_detach() - Detatches the client from a queue pair.
+ * @qpair:      Reference of a pointer to the qpair struct.
+ *
+ * This is the client interface for detaching from a VMCIQPair.
+ * Note that this routine will free the memory allocated for the
+ * vmci_qp structure too.
+ */
+int vmci_qpair_detach(struct vmci_qp **qpair)
+{
+	int result;
+	struct vmci_qp *old_qpair;
+
+	if (!qpair || !(*qpair))
+		return VMCI_ERROR_INVALID_ARGS;
+
+	old_qpair = *qpair;
+	result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
+
+	/*
+	 * The guest can fail to detach for a number of reasons, and
+	 * if it does so, it will cleanup the entry (if there is one).
+	 * The host can fail too, but it won't cleanup the entry
+	 * immediately, it will do that later when the context is
+	 * freed.  Either way, we need to release the qpair struct
+	 * here; there isn't much the caller can do, and we don't want
+	 * to leak.
+	 */
+
+	memset(old_qpair, 0, sizeof(*old_qpair));
+	old_qpair->handle = VMCI_INVALID_HANDLE;
+	old_qpair->peer = VMCI_INVALID_ID;
+	kfree(old_qpair);
+	*qpair = NULL;
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_detach);
+
+/*
+ * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
+ * @qpair:      Pointer to the queue pair struct.
+ * @producer_tail:      Reference used for storing producer tail index.
+ * @consumer_head:      Reference used for storing the consumer head index.
+ *
+ * This is the client interface for getting the current indexes of the
+ * QPair from the point of the view of the caller as the producer.
+ */
+int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
+				   u64 *producer_tail,
+				   u64 *consumer_head)
+{
+	struct vmci_queue_header *produce_q_header;
+	struct vmci_queue_header *consume_q_header;
+	int result;
+
+	if (!qpair)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+	result =
+	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+	if (result == VMCI_SUCCESS)
+		vmci_q_header_get_pointers(produce_q_header, consume_q_header,
+					   producer_tail, consumer_head);
+	qp_unlock(qpair);
+
+	if (result == VMCI_SUCCESS &&
+	    ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
+	     (consumer_head && *consumer_head >= qpair->produce_q_size)))
+		return VMCI_ERROR_INVALID_SIZE;
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
+
+/*
+ * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the comsumer.
+ * @qpair:      Pointer to the queue pair struct.
+ * @consumer_tail:      Reference used for storing consumer tail index.
+ * @producer_head:      Reference used for storing the producer head index.
+ *
+ * This is the client interface for getting the current indexes of the
+ * QPair from the point of the view of the caller as the consumer.
+ */
+int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
+				   u64 *consumer_tail,
+				   u64 *producer_head)
+{
+	struct vmci_queue_header *produce_q_header;
+	struct vmci_queue_header *consume_q_header;
+	int result;
+
+	if (!qpair)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+	result =
+	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+	if (result == VMCI_SUCCESS)
+		vmci_q_header_get_pointers(consume_q_header, produce_q_header,
+					   consumer_tail, producer_head);
+	qp_unlock(qpair);
+
+	if (result == VMCI_SUCCESS &&
+	    ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
+	     (producer_head && *producer_head >= qpair->consume_q_size)))
+		return VMCI_ERROR_INVALID_SIZE;
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
+
+/*
+ * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
+ * @qpair:      Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of free
+ * space in the QPair from the point of the view of the caller as
+ * the producer which is the common case.  Returns < 0 if err, else
+ * available bytes into which data can be enqueued if > 0.
+ */
+s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
+{
+	struct vmci_queue_header *produce_q_header;
+	struct vmci_queue_header *consume_q_header;
+	s64 result;
+
+	if (!qpair)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+	result =
+	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+	if (result == VMCI_SUCCESS)
+		result = vmci_q_header_free_space(produce_q_header,
+						  consume_q_header,
+						  qpair->produce_q_size);
+	else
+		result = 0;
+
+	qp_unlock(qpair);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
+
+/*
+ * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
+ * @qpair:      Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of free
+ * space in the QPair from the point of the view of the caller as
+ * the consumer which is not the common case.  Returns < 0 if err, else
+ * available bytes into which data can be enqueued if > 0.
+ */
+s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
+{
+	struct vmci_queue_header *produce_q_header;
+	struct vmci_queue_header *consume_q_header;
+	s64 result;
+
+	if (!qpair)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+	result =
+	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+	if (result == VMCI_SUCCESS)
+		result = vmci_q_header_free_space(consume_q_header,
+						  produce_q_header,
+						  qpair->consume_q_size);
+	else
+		result = 0;
+
+	qp_unlock(qpair);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
+
+/*
+ * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
+ * producer queue.
+ * @qpair:      Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of
+ * enqueued data in the QPair from the point of the view of the
+ * caller as the producer which is not the common case.  Returns < 0 if err,
+ * else available bytes that may be read.
+ */
+s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
+{
+	struct vmci_queue_header *produce_q_header;
+	struct vmci_queue_header *consume_q_header;
+	s64 result;
+
+	if (!qpair)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+	result =
+	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+	if (result == VMCI_SUCCESS)
+		result = vmci_q_header_buf_ready(produce_q_header,
+						 consume_q_header,
+						 qpair->produce_q_size);
+	else
+		result = 0;
+
+	qp_unlock(qpair);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
+
+/*
+ * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
+ * consumer queue.
+ * @qpair:      Pointer to the queue pair struct.
+ *
+ * This is the client interface for getting the amount of
+ * enqueued data in the QPair from the point of the view of the
+ * caller as the consumer which is the normal case.  Returns < 0 if err,
+ * else available bytes that may be read.
+ */
+s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
+{
+	struct vmci_queue_header *produce_q_header;
+	struct vmci_queue_header *consume_q_header;
+	s64 result;
+
+	if (!qpair)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+	result =
+	    qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
+	if (result == VMCI_SUCCESS)
+		result = vmci_q_header_buf_ready(consume_q_header,
+						 produce_q_header,
+						 qpair->consume_q_size);
+	else
+		result = 0;
+
+	qp_unlock(qpair);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
+
+/*
+ * vmci_qpair_enqueue() - Throw data on the queue.
+ * @qpair:      Pointer to the queue pair struct.
+ * @buf:        Pointer to buffer containing data
+ * @buf_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused).
+ *
+ * This is the client interface for enqueueing data into the queue.
+ * Returns number of bytes enqueued or < 0 on error.
+ */
+ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
+			   const void *buf,
+			   size_t buf_size,
+			   int buf_type)
+{
+	ssize_t result;
+
+	if (!qpair || !buf)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+
+	do {
+		result = qp_enqueue_locked(qpair->produce_q,
+					   qpair->consume_q,
+					   qpair->produce_q_size,
+					   buf, buf_size,
+					   qp_memcpy_to_queue,
+					   vmci_can_block(qpair->flags));
+
+		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+		    !qp_wait_for_ready_queue(qpair))
+			result = VMCI_ERROR_WOULD_BLOCK;
+
+	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+	qp_unlock(qpair);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
+
+/*
+ * vmci_qpair_dequeue() - Get data from the queue.
+ * @qpair:      Pointer to the queue pair struct.
+ * @buf:        Pointer to buffer for the data
+ * @buf_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused).
+ *
+ * This is the client interface for dequeueing data from the queue.
+ * Returns number of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
+			   void *buf,
+			   size_t buf_size,
+			   int buf_type)
+{
+	ssize_t result;
+
+	if (!qpair || !buf)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+
+	do {
+		result = qp_dequeue_locked(qpair->produce_q,
+					   qpair->consume_q,
+					   qpair->consume_q_size,
+					   buf, buf_size,
+					   qp_memcpy_from_queue, true,
+					   vmci_can_block(qpair->flags));
+
+		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+		    !qp_wait_for_ready_queue(qpair))
+			result = VMCI_ERROR_WOULD_BLOCK;
+
+	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+	qp_unlock(qpair);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
+
+/*
+ * vmci_qpair_peek() - Peek at the data in the queue.
+ * @qpair:      Pointer to the queue pair struct.
+ * @buf:        Pointer to buffer for the data
+ * @buf_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused on Linux).
+ *
+ * This is the client interface for peeking into a queue.  (I.e.,
+ * copy data from the queue without updating the head pointer.)
+ * Returns number of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
+			void *buf,
+			size_t buf_size,
+			int buf_type)
+{
+	ssize_t result;
+
+	if (!qpair || !buf)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+
+	do {
+		result = qp_dequeue_locked(qpair->produce_q,
+					   qpair->consume_q,
+					   qpair->consume_q_size,
+					   buf, buf_size,
+					   qp_memcpy_from_queue, false,
+					   vmci_can_block(qpair->flags));
+
+		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+		    !qp_wait_for_ready_queue(qpair))
+			result = VMCI_ERROR_WOULD_BLOCK;
+
+	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+	qp_unlock(qpair);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_peek);
+
+/*
+ * vmci_qpair_enquev() - Throw data on the queue using iov.
+ * @qpair:      Pointer to the queue pair struct.
+ * @iov:        Pointer to buffer containing data
+ * @iov_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused).
+ *
+ * This is the client interface for enqueueing data into the queue.
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes enqueued or < 0 on error.
+ */
+ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
+			  void *iov,
+			  size_t iov_size,
+			  int buf_type)
+{
+	ssize_t result;
+
+	if (!qpair || !iov)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+
+	do {
+		result = qp_enqueue_locked(qpair->produce_q,
+					   qpair->consume_q,
+					   qpair->produce_q_size,
+					   iov, iov_size,
+					   qp_memcpy_to_queue_iov,
+					   vmci_can_block(qpair->flags));
+
+		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+		    !qp_wait_for_ready_queue(qpair))
+			result = VMCI_ERROR_WOULD_BLOCK;
+
+	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+	qp_unlock(qpair);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
+
+/*
+ * vmci_qpair_dequev() - Get data from the queue using iov.
+ * @qpair:      Pointer to the queue pair struct.
+ * @iov:        Pointer to buffer for the data
+ * @iov_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused).
+ *
+ * This is the client interface for dequeueing data from the queue.
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes dequeued or < 0 on error.
+ */
+ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
+			  void *iov,
+			  size_t iov_size,
+			  int buf_type)
+{
+	ssize_t result;
+
+	if (!qpair || !iov)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+
+	do {
+		result = qp_dequeue_locked(qpair->produce_q,
+					   qpair->consume_q,
+					   qpair->consume_q_size,
+					   iov, iov_size,
+					   qp_memcpy_from_queue_iov,
+					   true, vmci_can_block(qpair->flags));
+
+		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+		    !qp_wait_for_ready_queue(qpair))
+			result = VMCI_ERROR_WOULD_BLOCK;
+
+	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+	qp_unlock(qpair);
+
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
+
+/*
+ * vmci_qpair_peekv() - Peek at the data in the queue using iov.
+ * @qpair:      Pointer to the queue pair struct.
+ * @iov:        Pointer to buffer for the data
+ * @iov_size:   Length of buffer.
+ * @buf_type:   Buffer type (Unused on Linux).
+ *
+ * This is the client interface for peeking into a queue.  (I.e.,
+ * copy data from the queue without updating the head pointer.)
+ * This function uses IO vectors to handle the work. Returns number
+ * of bytes peeked or < 0 on error.
+ */
+ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
+			 void *iov,
+			 size_t iov_size,
+			 int buf_type)
+{
+	ssize_t result;
+
+	if (!qpair || !iov)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	qp_lock(qpair);
+
+	do {
+		result = qp_dequeue_locked(qpair->produce_q,
+					   qpair->consume_q,
+					   qpair->consume_q_size,
+					   iov, iov_size,
+					   qp_memcpy_from_queue_iov,
+					   false, vmci_can_block(qpair->flags));
+
+		if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
+		    !qp_wait_for_ready_queue(qpair))
+			result = VMCI_ERROR_WOULD_BLOCK;
+
+	} while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
+
+	qp_unlock(qpair);
+	return result;
+}
+EXPORT_SYMBOL_GPL(vmci_qpair_peekv);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
new file mode 100644
index 0000000..58c6959
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -0,0 +1,191 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_QUEUE_PAIR_H_
+#define _VMCI_QUEUE_PAIR_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_context.h"
+
+/* Callback needed for correctly waiting on events. */
+typedef int (*vmci_event_release_cb) (void *client_data);
+
+/* Guest device port I/O. */
+struct ppn_set {
+	u64 num_produce_pages;
+	u64 num_consume_pages;
+	u32 *produce_ppns;
+	u32 *consume_ppns;
+	bool initialized;
+};
+
+/* VMCIqueue_pairAllocInfo */
+struct vmci_qp_alloc_info {
+	struct vmci_handle handle;
+	u32 peer;
+	u32 flags;
+	u64 produce_size;
+	u64 consume_size;
+	u64 ppn_va;	/* Start VA of queue pair PPNs. */
+	u64 num_ppns;
+	s32 result;
+	u32 version;
+};
+
+/* VMCIqueue_pairSetVAInfo */
+struct vmci_qp_set_va_info {
+	struct vmci_handle handle;
+	u64 va;		/* Start VA of queue pair PPNs. */
+	u64 num_ppns;
+	u32 version;
+	s32 result;
+};
+
+/*
+ * For backwards compatibility, here is a version of the
+ * VMCIqueue_pairPageFileInfo before host support end-points was added.
+ * Note that the current version of that structure requires VMX to
+ * pass down the VA of the mapped file.  Before host support was added
+ * there was nothing of the sort.  So, when the driver sees the ioctl
+ * with a parameter that is the sizeof
+ * VMCIqueue_pairPageFileInfo_NoHostQP then it can infer that the version
+ * of VMX running can't attach to host end points because it doesn't
+ * provide the VA of the mapped files.
+ *
+ * The Linux driver doesn't get an indication of the size of the
+ * structure passed down from user space.  So, to fix a long standing
+ * but unfiled bug, the _pad field has been renamed to version.
+ * Existing versions of VMX always initialize the PageFileInfo
+ * structure so that _pad, er, version is set to 0.
+ *
+ * A version value of 1 indicates that the size of the structure has
+ * been increased to include two UVA's: produce_uva and consume_uva.
+ * These UVA's are of the mmap()'d queue contents backing files.
+ *
+ * In addition, if when VMX is sending down the
+ * VMCIqueue_pairPageFileInfo structure it gets an error then it will
+ * try again with the _NoHostQP version of the file to see if an older
+ * VMCI kernel module is running.
+ */
+
+/* VMCIqueue_pairPageFileInfo */
+struct vmci_qp_page_file_info {
+	struct vmci_handle handle;
+	u64 produce_page_file;	  /* User VA. */
+	u64 consume_page_file;	  /* User VA. */
+	u64 produce_page_file_size;  /* Size of the file name array. */
+	u64 consume_page_file_size;  /* Size of the file name array. */
+	s32 result;
+	u32 version;	/* Was _pad. */
+	u64 produce_va;	/* User VA of the mapped file. */
+	u64 consume_va;	/* User VA of the mapped file. */
+};
+
+/* vmci queuepair detach info */
+struct vmci_qp_dtch_info {
+	struct vmci_handle handle;
+	s32 result;
+	u32 _pad;
+};
+
+/*
+ * struct vmci_qp_page_store describes how the memory of a given queue pair
+ * is backed. When the queue pair is between the host and a guest, the
+ * page store consists of references to the guest pages. On vmkernel,
+ * this is a list of PPNs, and on hosted, it is a user VA where the
+ * queue pair is mapped into the VMX address space.
+ */
+struct vmci_qp_page_store {
+	/* Reference to pages backing the queue pair. */
+	u64 pages;
+	/* Length of pageList/virtual addres range (in pages). */
+	u32 len;
+};
+
+/*
+ * This data type contains the information about a queue.
+ * There are two queues (hence, queue pairs) per transaction model between a
+ * pair of end points, A & B.  One queue is used by end point A to transmit
+ * commands and responses to B.  The other queue is used by B to transmit
+ * commands and responses.
+ *
+ * struct vmci_queue_kern_if is a per-OS defined Queue structure.  It contains
+ * either a direct pointer to the linear address of the buffer contents or a
+ * pointer to structures which help the OS locate those data pages.  See
+ * vmciKernelIf.c for each platform for its definition.
+ */
+struct vmci_queue {
+	struct vmci_queue_header *q_header;
+	struct vmci_queue_header *saved_header;
+	struct vmci_queue_kern_if *kernel_if;
+};
+
+/*
+ * Utility function that checks whether the fields of the page
+ * store contain valid values.
+ * Result:
+ * true if the page store is wellformed. false otherwise.
+ */
+static inline bool
+VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
+{
+	return page_store->len >= 2;
+}
+
+/*
+ * Helper function to check if the non-blocking flag
+ * is set for a given queue pair.
+ */
+static inline bool vmci_can_block(u32 flags)
+{
+	return !(flags & VMCI_QPFLAG_NONBLOCK);
+}
+
+/*
+ * Helper function to check if the queue pair is pinned
+ * into memory.
+ */
+static inline bool vmci_qp_pinned(u32 flags)
+{
+	return flags & VMCI_QPFLAG_PINNED;
+}
+
+void vmci_qp_broker_exit(void);
+int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
+			 u32 flags, u32 priv_flags,
+			 u64 produce_size, u64 consume_size,
+			 struct vmci_qp_page_store *page_store,
+			 struct vmci_ctx *context);
+int vmci_qp_broker_set_page_store(struct vmci_handle handle,
+				  u64 produce_uva, u64 consume_uva,
+				  struct vmci_ctx *context);
+int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context);
+
+void vmci_qp_guest_endpoints_exit(void);
+
+int vmci_qp_alloc(struct vmci_handle *handle,
+		  struct vmci_queue **produce_q, u64 produce_size,
+		  struct vmci_queue **consume_q, u64 consume_size,
+		  u32 peer, u32 flags, u32 priv_flags,
+		  bool guest_endpoint, vmci_event_release_cb wakeup_cb,
+		  void *client_data);
+int vmci_qp_broker_map(struct vmci_handle handle,
+		       struct vmci_ctx *context, u64 guest_mem);
+int vmci_qp_broker_unmap(struct vmci_handle handle,
+			 struct vmci_ctx *context, u32 gid);
+
+#endif /* _VMCI_QUEUE_PAIR_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
new file mode 100644
index 0000000..a196f84
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -0,0 +1,229 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/rculist.h>
+
+#include "vmci_resource.h"
+#include "vmci_driver.h"
+
+
+#define VMCI_RESOURCE_HASH_BITS         7
+#define VMCI_RESOURCE_HASH_BUCKETS      (1 << VMCI_RESOURCE_HASH_BITS)
+
+struct vmci_hash_table {
+	spinlock_t lock;
+	struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
+};
+
+static struct vmci_hash_table vmci_resource_table = {
+	.lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
+};
+
+static unsigned int vmci_resource_hash(struct vmci_handle handle)
+{
+	return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
+}
+
+/*
+ * Gets a resource (if one exists) matching given handle from the hash table.
+ */
+static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
+						  enum vmci_resource_type type)
+{
+	struct vmci_resource *r, *resource = NULL;
+	struct hlist_node *node;
+	unsigned int idx = vmci_resource_hash(handle);
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(r, node,
+				 &vmci_resource_table.entries[idx], node) {
+		u32 cid = r->handle.context;
+		u32 rid = r->handle.resource;
+
+		if (r->type == type &&
+		    rid == handle.resource &&
+		    (cid == handle.context || cid == VMCI_INVALID_ID)) {
+			resource = r;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	return resource;
+}
+
+/*
+ * Find an unused resource ID and return it. The first
+ * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
+ * its value + 1.
+ * Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
+ */
+static u32 vmci_resource_find_id(u32 context_id,
+				 enum vmci_resource_type resource_type)
+{
+	static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
+	u32 old_rid = resource_id;
+	u32 current_rid;
+
+	/*
+	 * Generate a unique resource ID.  Keep on trying until we wrap around
+	 * in the RID space.
+	 */
+	do {
+		struct vmci_handle handle;
+
+		current_rid = resource_id;
+		resource_id++;
+		if (unlikely(resource_id == VMCI_INVALID_ID)) {
+			/* Skip the reserved rids. */
+			resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
+		}
+
+		handle = vmci_make_handle(context_id, current_rid);
+		if (!vmci_resource_lookup(handle, resource_type))
+			return current_rid;
+	} while (resource_id != old_rid);
+
+	return VMCI_INVALID_ID;
+}
+
+
+int vmci_resource_add(struct vmci_resource *resource,
+		      enum vmci_resource_type resource_type,
+		      struct vmci_handle handle)
+
+{
+	unsigned int idx;
+	int result;
+
+	spin_lock(&vmci_resource_table.lock);
+
+	if (handle.resource == VMCI_INVALID_ID) {
+		handle.resource = vmci_resource_find_id(handle.context,
+			resource_type);
+		if (handle.resource == VMCI_INVALID_ID) {
+			result = VMCI_ERROR_NO_HANDLE;
+			goto out;
+		}
+	} else if (vmci_resource_lookup(handle, resource_type)) {
+		result = VMCI_ERROR_ALREADY_EXISTS;
+		goto out;
+	}
+
+	resource->handle = handle;
+	resource->type = resource_type;
+	INIT_HLIST_NODE(&resource->node);
+	kref_init(&resource->kref);
+	init_completion(&resource->done);
+
+	idx = vmci_resource_hash(resource->handle);
+	hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
+
+	result = VMCI_SUCCESS;
+
+out:
+	spin_unlock(&vmci_resource_table.lock);
+	return result;
+}
+
+void vmci_resource_remove(struct vmci_resource *resource)
+{
+	struct vmci_handle handle = resource->handle;
+	unsigned int idx = vmci_resource_hash(handle);
+	struct vmci_resource *r;
+	struct hlist_node *node;
+
+	/* Remove resource from hash table. */
+	spin_lock(&vmci_resource_table.lock);
+
+	hlist_for_each_entry(r, node, &vmci_resource_table.entries[idx], node) {
+		if (vmci_handle_is_equal(r->handle, resource->handle)) {
+			hlist_del_init_rcu(&r->node);
+			break;
+		}
+	}
+
+	spin_unlock(&vmci_resource_table.lock);
+	synchronize_rcu();
+
+	vmci_resource_put(resource);
+	wait_for_completion(&resource->done);
+}
+
+struct vmci_resource *
+vmci_resource_by_handle(struct vmci_handle resource_handle,
+			enum vmci_resource_type resource_type)
+{
+	struct vmci_resource *r, *resource = NULL;
+
+	rcu_read_lock();
+
+	r = vmci_resource_lookup(resource_handle, resource_type);
+	if (r &&
+	    (resource_type == r->type ||
+	     resource_type == VMCI_RESOURCE_TYPE_ANY)) {
+		resource = vmci_resource_get(r);
+	}
+
+	rcu_read_unlock();
+
+	return resource;
+}
+
+/*
+ * Get a reference to given resource.
+ */
+struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
+{
+	kref_get(&resource->kref);
+
+	return resource;
+}
+
+static void vmci_release_resource(struct kref *kref)
+{
+	struct vmci_resource *resource =
+		container_of(kref, struct vmci_resource, kref);
+
+	/* Verify the resource has been unlinked from hash table */
+	WARN_ON(!hlist_unhashed(&resource->node));
+
+	/* Signal that container of this resource can now be destroyed */
+	complete(&resource->done);
+}
+
+/*
+ * Resource's release function will get called if last reference.
+ * If it is the last reference, then we are sure that nobody else
+ * can increment the count again (it's gone from the resource hash
+ * table), so there's no need for locking here.
+ */
+int vmci_resource_put(struct vmci_resource *resource)
+{
+	/*
+	 * We propagate the information back to caller in case it wants to know
+	 * whether entry was freed.
+	 */
+	return kref_put(&resource->kref, vmci_release_resource) ?
+		VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
+}
+
+struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
+{
+	return resource->handle;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_resource.h b/drivers/misc/vmw_vmci/vmci_resource.h
new file mode 100644
index 0000000..9190cd2
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_resource.h
@@ -0,0 +1,59 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_RESOURCE_H_
+#define _VMCI_RESOURCE_H_
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/types.h>
+
+#include "vmci_context.h"
+
+
+enum vmci_resource_type {
+	VMCI_RESOURCE_TYPE_ANY,
+	VMCI_RESOURCE_TYPE_API,
+	VMCI_RESOURCE_TYPE_GROUP,
+	VMCI_RESOURCE_TYPE_DATAGRAM,
+	VMCI_RESOURCE_TYPE_DOORBELL,
+	VMCI_RESOURCE_TYPE_QPAIR_GUEST,
+	VMCI_RESOURCE_TYPE_QPAIR_HOST
+};
+
+struct vmci_resource {
+	struct vmci_handle handle;
+	enum vmci_resource_type type;
+	struct hlist_node node;
+	struct kref kref;
+	struct completion done;
+};
+
+
+int vmci_resource_add(struct vmci_resource *resource,
+		      enum vmci_resource_type resource_type,
+		      struct vmci_handle handle);
+
+void vmci_resource_remove(struct vmci_resource *resource);
+
+struct vmci_resource *
+vmci_resource_by_handle(struct vmci_handle resource_handle,
+			enum vmci_resource_type resource_type);
+
+struct vmci_resource *vmci_resource_get(struct vmci_resource *resource);
+int vmci_resource_put(struct vmci_resource *resource);
+
+struct vmci_handle vmci_resource_handle(struct vmci_resource *resource);
+
+#endif /* _VMCI_RESOURCE_H_ */
diff --git a/drivers/misc/vmw_vmci/vmci_route.c b/drivers/misc/vmw_vmci/vmci_route.c
new file mode 100644
index 0000000..9109065
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_route.c
@@ -0,0 +1,226 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#include <linux/vmw_vmci_defs.h>
+#include <linux/vmw_vmci_api.h>
+
+#include "vmci_context.h"
+#include "vmci_driver.h"
+#include "vmci_route.h"
+
+/*
+ * Make a routing decision for the given source and destination handles.
+ * This will try to determine the route using the handles and the available
+ * devices.  Will set the source context if it is invalid.
+ */
+int vmci_route(struct vmci_handle *src,
+	       const struct vmci_handle *dst,
+	       bool from_guest,
+	       enum vmci_route *route)
+{
+	bool has_host_device = vmci_host_code_active();
+	bool has_guest_device = vmci_guest_code_active();
+
+	*route = VMCI_ROUTE_NONE;
+
+	/*
+	 * "from_guest" is only ever set to true by
+	 * IOCTL_VMCI_DATAGRAM_SEND (or by the vmkernel equivalent),
+	 * which comes from the VMX, so we know it is coming from a
+	 * guest.
+	 *
+	 * To avoid inconsistencies, test these once.  We will test
+	 * them again when we do the actual send to ensure that we do
+	 * not touch a non-existent device.
+	 */
+
+	/* Must have a valid destination context. */
+	if (VMCI_INVALID_ID == dst->context)
+		return VMCI_ERROR_INVALID_ARGS;
+
+	/* Anywhere to hypervisor. */
+	if (VMCI_HYPERVISOR_CONTEXT_ID == dst->context) {
+
+		/*
+		 * If this message already came from a guest then we
+		 * cannot send it to the hypervisor.  It must come
+		 * from a local client.
+		 */
+		if (from_guest)
+			return VMCI_ERROR_DST_UNREACHABLE;
+
+		/*
+		 * We must be acting as a guest in order to send to
+		 * the hypervisor.
+		 */
+		if (!has_guest_device)
+			return VMCI_ERROR_DEVICE_NOT_FOUND;
+
+		/* And we cannot send if the source is the host context. */
+		if (VMCI_HOST_CONTEXT_ID == src->context)
+			return VMCI_ERROR_INVALID_ARGS;
+
+		/*
+		 * If the client passed the ANON source handle then
+		 * respect it (both context and resource are invalid).
+		 * However, if they passed only an invalid context,
+		 * then they probably mean ANY, in which case we
+		 * should set the real context here before passing it
+		 * down.
+		 */
+		if (VMCI_INVALID_ID == src->context &&
+		    VMCI_INVALID_ID != src->resource)
+			src->context = vmci_get_context_id();
+
+		/* Send from local client down to the hypervisor. */
+		*route = VMCI_ROUTE_AS_GUEST;
+		return VMCI_SUCCESS;
+	}
+
+	/* Anywhere to local client on host. */
+	if (VMCI_HOST_CONTEXT_ID == dst->context) {
+		/*
+		 * If it is not from a guest but we are acting as a
+		 * guest, then we need to send it down to the host.
+		 * Note that if we are also acting as a host then this
+		 * will prevent us from sending from local client to
+		 * local client, but we accept that restriction as a
+		 * way to remove any ambiguity from the host context.
+		 */
+		if (src->context == VMCI_HYPERVISOR_CONTEXT_ID) {
+			/*
+			 * If the hypervisor is the source, this is
+			 * host local communication. The hypervisor
+			 * may send vmci event datagrams to the host
+			 * itself, but it will never send datagrams to
+			 * an "outer host" through the guest device.
+			 */
+
+			if (has_host_device) {
+				*route = VMCI_ROUTE_AS_HOST;
+				return VMCI_SUCCESS;
+			} else {
+				return VMCI_ERROR_DEVICE_NOT_FOUND;
+			}
+		}
+
+		if (!from_guest && has_guest_device) {
+			/* If no source context then use the current. */
+			if (VMCI_INVALID_ID == src->context)
+				src->context = vmci_get_context_id();
+
+			/* Send it from local client down to the host. */
+			*route = VMCI_ROUTE_AS_GUEST;
+			return VMCI_SUCCESS;
+		}
+
+		/*
+		 * Otherwise we already received it from a guest and
+		 * it is destined for a local client on this host, or
+		 * it is from another local client on this host.  We
+		 * must be acting as a host to service it.
+		 */
+		if (!has_host_device)
+			return VMCI_ERROR_DEVICE_NOT_FOUND;
+
+		if (VMCI_INVALID_ID == src->context) {
+			/*
+			 * If it came from a guest then it must have a
+			 * valid context.  Otherwise we can use the
+			 * host context.
+			 */
+			if (from_guest)
+				return VMCI_ERROR_INVALID_ARGS;
+
+			src->context = VMCI_HOST_CONTEXT_ID;
+		}
+
+		/* Route to local client. */
+		*route = VMCI_ROUTE_AS_HOST;
+		return VMCI_SUCCESS;
+	}
+
+	/*
+	 * If we are acting as a host then this might be destined for
+	 * a guest.
+	 */
+	if (has_host_device) {
+		/* It will have a context if it is meant for a guest. */
+		if (vmci_ctx_exists(dst->context)) {
+			if (VMCI_INVALID_ID == src->context) {
+				/*
+				 * If it came from a guest then it
+				 * must have a valid context.
+				 * Otherwise we can use the host
+				 * context.
+				 */
+
+				if (from_guest)
+					return VMCI_ERROR_INVALID_ARGS;
+
+				src->context = VMCI_HOST_CONTEXT_ID;
+			} else if (VMCI_CONTEXT_IS_VM(src->context) &&
+				   src->context != dst->context) {
+				/*
+				 * VM to VM communication is not
+				 * allowed. Since we catch all
+				 * communication destined for the host
+				 * above, this must be destined for a
+				 * VM since there is a valid context.
+				 */
+
+				return VMCI_ERROR_DST_UNREACHABLE;
+			}
+
+			/* Pass it up to the guest. */
+			*route = VMCI_ROUTE_AS_HOST;
+			return VMCI_SUCCESS;
+		} else if (!has_guest_device) {
+			/*
+			 * The host is attempting to reach a CID
+			 * without an active context, and we can't
+			 * send it down, since we have no guest
+			 * device.
+			 */
+
+			return VMCI_ERROR_DST_UNREACHABLE;
+		}
+	}
+
+	/*
+	 * We must be a guest trying to send to another guest, which means
+	 * we need to send it down to the host. We do not filter out VM to
+	 * VM communication here, since we want to be able to use the guest
+	 * driver on older versions that do support VM to VM communication.
+	 */
+	if (!has_guest_device) {
+		/*
+		 * Ending up here means we have neither guest nor host
+		 * device.
+		 */
+		return VMCI_ERROR_DEVICE_NOT_FOUND;
+	}
+
+	/* If no source context then use the current context. */
+	if (VMCI_INVALID_ID == src->context)
+		src->context = vmci_get_context_id();
+
+	/*
+	 * Send it from local client down to the host, which will
+	 * route it to the other guest for us.
+	 */
+	*route = VMCI_ROUTE_AS_GUEST;
+	return VMCI_SUCCESS;
+}
diff --git a/drivers/misc/vmw_vmci/vmci_route.h b/drivers/misc/vmw_vmci/vmci_route.h
new file mode 100644
index 0000000..3b30e82
--- /dev/null
+++ b/drivers/misc/vmw_vmci/vmci_route.h
@@ -0,0 +1,30 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMCI_ROUTE_H_
+#define _VMCI_ROUTE_H_
+
+#include <linux/vmw_vmci_defs.h>
+
+enum vmci_route {
+	VMCI_ROUTE_NONE,
+	VMCI_ROUTE_AS_HOST,
+	VMCI_ROUTE_AS_GUEST,
+};
+
+int vmci_route(struct vmci_handle *src, const struct vmci_handle *dst,
+	       bool from_guest, enum vmci_route *route);
+
+#endif /* _VMCI_ROUTE_H_ */
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index cc8a8fa..3be8b94 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -457,7 +457,7 @@
 
 config MMC_CB710
 	tristate "ENE CB710 MMC/SD Interface support"
-	depends on PCI
+	depends on PCI && GENERIC_HARDIRQS
 	select CB710_CORE
 	help
 	  This option enables support for MMC/SD part of ENE CB710/720 Flash
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 2334190..56c2d75 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -188,6 +188,10 @@
 config NET_POLL_CONTROLLER
 	def_bool NETPOLL
 
+config NTB_NETDEV
+	tristate "Virtual Ethernet over NTB"
+	depends on NTB
+
 config RIONET
 	tristate "RapidIO Ethernet over messaging driver support"
 	depends on RAPIDIO
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 335db78..ef3d090 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -71,3 +71,4 @@
 obj-$(CONFIG_USB_CDC_PHONET)   += usb/
 
 obj-$(CONFIG_HYPERV_NET) += hyperv/
+obj-$(CONFIG_NTB_NETDEV) += ntb_netdev.o
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d5202a4..5f85205 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -498,8 +498,7 @@
 
 static const struct hv_vmbus_device_id id_table[] = {
 	/* Network guid */
-	{ VMBUS_DEVICE(0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
-		       0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E) },
+	{ HV_NIC_GUID, },
 	{ },
 };
 
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
new file mode 100644
index 0000000..ed947dd
--- /dev/null
+++ b/drivers/net/ntb_netdev.c
@@ -0,0 +1,408 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Network Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ntb.h>
+
+#define NTB_NETDEV_VER	"0.7"
+
+MODULE_DESCRIPTION(KBUILD_MODNAME);
+MODULE_VERSION(NTB_NETDEV_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+struct ntb_netdev {
+	struct list_head list;
+	struct pci_dev *pdev;
+	struct net_device *ndev;
+	struct ntb_transport_qp *qp;
+};
+
+#define	NTB_TX_TIMEOUT_MS	1000
+#define	NTB_RXQ_SIZE		100
+
+static LIST_HEAD(dev_list);
+
+static void ntb_netdev_event_handler(void *data, int status)
+{
+	struct net_device *ndev = data;
+	struct ntb_netdev *dev = netdev_priv(ndev);
+
+	netdev_dbg(ndev, "Event %x, Link %x\n", status,
+		   ntb_transport_link_query(dev->qp));
+
+	/* Currently, only link status event is supported */
+	if (status)
+		netif_carrier_on(ndev);
+	else
+		netif_carrier_off(ndev);
+}
+
+static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
+				  void *data, int len)
+{
+	struct net_device *ndev = qp_data;
+	struct sk_buff *skb;
+	int rc;
+
+	skb = data;
+	if (!skb)
+		return;
+
+	netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
+
+	skb_put(skb, len);
+	skb->protocol = eth_type_trans(skb, ndev);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	if (netif_rx(skb) == NET_RX_DROP) {
+		ndev->stats.rx_errors++;
+		ndev->stats.rx_dropped++;
+	} else {
+		ndev->stats.rx_packets++;
+		ndev->stats.rx_bytes += len;
+	}
+
+	skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
+	if (!skb) {
+		ndev->stats.rx_errors++;
+		ndev->stats.rx_frame_errors++;
+		return;
+	}
+
+	rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
+	if (rc) {
+		dev_kfree_skb(skb);
+		ndev->stats.rx_errors++;
+		ndev->stats.rx_fifo_errors++;
+	}
+}
+
+static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
+				  void *data, int len)
+{
+	struct net_device *ndev = qp_data;
+	struct sk_buff *skb;
+
+	skb = data;
+	if (!skb || !ndev)
+		return;
+
+	if (len > 0) {
+		ndev->stats.tx_packets++;
+		ndev->stats.tx_bytes += skb->len;
+	} else {
+		ndev->stats.tx_errors++;
+		ndev->stats.tx_aborted_errors++;
+	}
+
+	dev_kfree_skb(skb);
+}
+
+static netdev_tx_t ntb_netdev_start_xmit(struct sk_buff *skb,
+					 struct net_device *ndev)
+{
+	struct ntb_netdev *dev = netdev_priv(ndev);
+	int rc;
+
+	netdev_dbg(ndev, "%s: skb len %d\n", __func__, skb->len);
+
+	rc = ntb_transport_tx_enqueue(dev->qp, skb, skb->data, skb->len);
+	if (rc)
+		goto err;
+
+	return NETDEV_TX_OK;
+
+err:
+	ndev->stats.tx_dropped++;
+	ndev->stats.tx_errors++;
+	return NETDEV_TX_BUSY;
+}
+
+static int ntb_netdev_open(struct net_device *ndev)
+{
+	struct ntb_netdev *dev = netdev_priv(ndev);
+	struct sk_buff *skb;
+	int rc, i, len;
+
+	/* Add some empty rx bufs */
+	for (i = 0; i < NTB_RXQ_SIZE; i++) {
+		skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN);
+		if (!skb) {
+			rc = -ENOMEM;
+			goto err;
+		}
+
+		rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
+					      ndev->mtu + ETH_HLEN);
+		if (rc == -EINVAL)
+			goto err;
+	}
+
+	netif_carrier_off(ndev);
+	ntb_transport_link_up(dev->qp);
+
+	return 0;
+
+err:
+	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+		dev_kfree_skb(skb);
+	return rc;
+}
+
+static int ntb_netdev_close(struct net_device *ndev)
+{
+	struct ntb_netdev *dev = netdev_priv(ndev);
+	struct sk_buff *skb;
+	int len;
+
+	ntb_transport_link_down(dev->qp);
+
+	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+		dev_kfree_skb(skb);
+
+	return 0;
+}
+
+static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
+{
+	struct ntb_netdev *dev = netdev_priv(ndev);
+	struct sk_buff *skb;
+	int len, rc;
+
+	if (new_mtu > ntb_transport_max_size(dev->qp) - ETH_HLEN)
+		return -EINVAL;
+
+	if (!netif_running(ndev)) {
+		ndev->mtu = new_mtu;
+		return 0;
+	}
+
+	/* Bring down the link and dispose of posted rx entries */
+	ntb_transport_link_down(dev->qp);
+
+	if (ndev->mtu < new_mtu) {
+		int i;
+
+		for (i = 0; (skb = ntb_transport_rx_remove(dev->qp, &len)); i++)
+			dev_kfree_skb(skb);
+
+		for (; i; i--) {
+			skb = netdev_alloc_skb(ndev, new_mtu + ETH_HLEN);
+			if (!skb) {
+				rc = -ENOMEM;
+				goto err;
+			}
+
+			rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
+						      new_mtu + ETH_HLEN);
+			if (rc) {
+				dev_kfree_skb(skb);
+				goto err;
+			}
+		}
+	}
+
+	ndev->mtu = new_mtu;
+
+	ntb_transport_link_up(dev->qp);
+
+	return 0;
+
+err:
+	ntb_transport_link_down(dev->qp);
+
+	while ((skb = ntb_transport_rx_remove(dev->qp, &len)))
+		dev_kfree_skb(skb);
+
+	netdev_err(ndev, "Error changing MTU, device inoperable\n");
+	return rc;
+}
+
+static const struct net_device_ops ntb_netdev_ops = {
+	.ndo_open = ntb_netdev_open,
+	.ndo_stop = ntb_netdev_close,
+	.ndo_start_xmit = ntb_netdev_start_xmit,
+	.ndo_change_mtu = ntb_netdev_change_mtu,
+	.ndo_set_mac_address = eth_mac_addr,
+};
+
+static void ntb_get_drvinfo(struct net_device *ndev,
+			    struct ethtool_drvinfo *info)
+{
+	struct ntb_netdev *dev = netdev_priv(ndev);
+
+	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+	strlcpy(info->version, NTB_NETDEV_VER, sizeof(info->version));
+	strlcpy(info->bus_info, pci_name(dev->pdev), sizeof(info->bus_info));
+}
+
+static int ntb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	cmd->supported = SUPPORTED_Backplane;
+	cmd->advertising = ADVERTISED_Backplane;
+	cmd->speed = SPEED_UNKNOWN;
+	ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+	cmd->duplex = DUPLEX_FULL;
+	cmd->port = PORT_OTHER;
+	cmd->phy_address = 0;
+	cmd->transceiver = XCVR_DUMMY1;
+	cmd->autoneg = AUTONEG_ENABLE;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+
+	return 0;
+}
+
+static const struct ethtool_ops ntb_ethtool_ops = {
+	.get_drvinfo = ntb_get_drvinfo,
+	.get_link = ethtool_op_get_link,
+	.get_settings = ntb_get_settings,
+};
+
+static const struct ntb_queue_handlers ntb_netdev_handlers = {
+	.tx_handler = ntb_netdev_tx_handler,
+	.rx_handler = ntb_netdev_rx_handler,
+	.event_handler = ntb_netdev_event_handler,
+};
+
+static int ntb_netdev_probe(struct pci_dev *pdev)
+{
+	struct net_device *ndev;
+	struct ntb_netdev *dev;
+	int rc;
+
+	ndev = alloc_etherdev(sizeof(struct ntb_netdev));
+	if (!ndev)
+		return -ENOMEM;
+
+	dev = netdev_priv(ndev);
+	dev->ndev = ndev;
+	dev->pdev = pdev;
+	BUG_ON(!dev->pdev);
+	ndev->features = NETIF_F_HIGHDMA;
+
+	ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+
+	ndev->hw_features = ndev->features;
+	ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
+
+	random_ether_addr(ndev->perm_addr);
+	memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
+
+	ndev->netdev_ops = &ntb_netdev_ops;
+	SET_ETHTOOL_OPS(ndev, &ntb_ethtool_ops);
+
+	dev->qp = ntb_transport_create_queue(ndev, pdev, &ntb_netdev_handlers);
+	if (!dev->qp) {
+		rc = -EIO;
+		goto err;
+	}
+
+	ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN;
+
+	rc = register_netdev(ndev);
+	if (rc)
+		goto err1;
+
+	list_add(&dev->list, &dev_list);
+	dev_info(&pdev->dev, "%s created\n", ndev->name);
+	return 0;
+
+err1:
+	ntb_transport_free_queue(dev->qp);
+err:
+	free_netdev(ndev);
+	return rc;
+}
+
+static void ntb_netdev_remove(struct pci_dev *pdev)
+{
+	struct net_device *ndev;
+	struct ntb_netdev *dev;
+
+	list_for_each_entry(dev, &dev_list, list) {
+		if (dev->pdev == pdev)
+			break;
+	}
+	if (dev == NULL)
+		return;
+
+	ndev = dev->ndev;
+
+	unregister_netdev(ndev);
+	ntb_transport_free_queue(dev->qp);
+	free_netdev(ndev);
+}
+
+static struct ntb_client ntb_netdev_client = {
+	.driver.name = KBUILD_MODNAME,
+	.driver.owner = THIS_MODULE,
+	.probe = ntb_netdev_probe,
+	.remove = ntb_netdev_remove,
+};
+
+static int __init ntb_netdev_init_module(void)
+{
+	int rc;
+
+	rc = ntb_register_client_dev(KBUILD_MODNAME);
+	if (rc)
+		return rc;
+	return ntb_register_client(&ntb_netdev_client);
+}
+module_init(ntb_netdev_init_module);
+
+static void __exit ntb_netdev_exit_module(void)
+{
+	ntb_unregister_client(&ntb_netdev_client);
+	ntb_unregister_client_dev(KBUILD_MODNAME);
+}
+module_exit(ntb_netdev_exit_module);
diff --git a/drivers/ntb/Kconfig b/drivers/ntb/Kconfig
new file mode 100644
index 0000000..37ee649
--- /dev/null
+++ b/drivers/ntb/Kconfig
@@ -0,0 +1,13 @@
+config NTB
+       tristate "Intel Non-Transparent Bridge support"
+       depends on PCI
+       depends on X86_64
+       help
+        The PCI-E Non-transparent bridge hardware is a point-to-point PCI-E bus
+        connecting 2 systems.  When configured, writes to the device's PCI
+        mapped memory will be mirrored to a buffer on the remote system.  The
+        ntb Linux driver uses this point-to-point communication as a method to
+        transfer data from one system to the other.
+
+        If unsure, say N.
+
diff --git a/drivers/ntb/Makefile b/drivers/ntb/Makefile
new file mode 100644
index 0000000..15cb59f
--- /dev/null
+++ b/drivers/ntb/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_NTB) += ntb.o
+
+ntb-objs := ntb_hw.o ntb_transport.o
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
new file mode 100644
index 0000000..f802e7c
--- /dev/null
+++ b/drivers/ntb/ntb_hw.c
@@ -0,0 +1,1141 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "ntb_hw.h"
+#include "ntb_regs.h"
+
+#define NTB_NAME	"Intel(R) PCI-E Non-Transparent Bridge Driver"
+#define NTB_VER		"0.25"
+
+MODULE_DESCRIPTION(NTB_NAME);
+MODULE_VERSION(NTB_VER);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel Corporation");
+
+enum {
+	NTB_CONN_CLASSIC = 0,
+	NTB_CONN_B2B,
+	NTB_CONN_RP,
+};
+
+enum {
+	NTB_DEV_USD = 0,
+	NTB_DEV_DSD,
+};
+
+enum {
+	SNB_HW = 0,
+	BWD_HW,
+};
+
+/* Translate memory window 0,1 to BAR 2,4 */
+#define MW_TO_BAR(mw)	(mw * 2 + 2)
+
+static DEFINE_PCI_DEVICE_TABLE(ntb_pci_tbl) = {
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BWD)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_JSF)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_RP_SNB)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
+	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB)},
+	{0}
+};
+MODULE_DEVICE_TABLE(pci, ntb_pci_tbl);
+
+/**
+ * ntb_register_event_callback() - register event callback
+ * @ndev: pointer to ntb_device instance
+ * @func: callback function to register
+ *
+ * This function registers a callback for any HW driver events such as link
+ * up/down, power management notices and etc.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_event_callback(struct ntb_device *ndev,
+			    void (*func)(void *handle, enum ntb_hw_event event))
+{
+	if (ndev->event_cb)
+		return -EINVAL;
+
+	ndev->event_cb = func;
+
+	return 0;
+}
+
+/**
+ * ntb_unregister_event_callback() - unregisters the event callback
+ * @ndev: pointer to ntb_device instance
+ *
+ * This function unregisters the existing callback from transport
+ */
+void ntb_unregister_event_callback(struct ntb_device *ndev)
+{
+	ndev->event_cb = NULL;
+}
+
+/**
+ * ntb_register_db_callback() - register a callback for doorbell interrupt
+ * @ndev: pointer to ntb_device instance
+ * @idx: doorbell index to register callback, zero based
+ * @func: callback function to register
+ *
+ * This function registers a callback function for the doorbell interrupt
+ * on the primary side. The function will unmask the doorbell as well to
+ * allow interrupt.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
+			     void *data, void (*func)(void *data, int db_num))
+{
+	unsigned long mask;
+
+	if (idx >= ndev->max_cbs || ndev->db_cb[idx].callback) {
+		dev_warn(&ndev->pdev->dev, "Invalid Index.\n");
+		return -EINVAL;
+	}
+
+	ndev->db_cb[idx].callback = func;
+	ndev->db_cb[idx].data = data;
+
+	/* unmask interrupt */
+	mask = readw(ndev->reg_ofs.pdb_mask);
+	clear_bit(idx * ndev->bits_per_vector, &mask);
+	writew(mask, ndev->reg_ofs.pdb_mask);
+
+	return 0;
+}
+
+/**
+ * ntb_unregister_db_callback() - unregister a callback for doorbell interrupt
+ * @ndev: pointer to ntb_device instance
+ * @idx: doorbell index to register callback, zero based
+ *
+ * This function unregisters a callback function for the doorbell interrupt
+ * on the primary side. The function will also mask the said doorbell.
+ */
+void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx)
+{
+	unsigned long mask;
+
+	if (idx >= ndev->max_cbs || !ndev->db_cb[idx].callback)
+		return;
+
+	mask = readw(ndev->reg_ofs.pdb_mask);
+	set_bit(idx * ndev->bits_per_vector, &mask);
+	writew(mask, ndev->reg_ofs.pdb_mask);
+
+	ndev->db_cb[idx].callback = NULL;
+}
+
+/**
+ * ntb_find_transport() - find the transport pointer
+ * @transport: pointer to pci device
+ *
+ * Given the pci device pointer, return the transport pointer passed in when
+ * the transport attached when it was inited.
+ *
+ * RETURNS: pointer to transport.
+ */
+void *ntb_find_transport(struct pci_dev *pdev)
+{
+	struct ntb_device *ndev = pci_get_drvdata(pdev);
+	return ndev->ntb_transport;
+}
+
+/**
+ * ntb_register_transport() - Register NTB transport with NTB HW driver
+ * @transport: transport identifier
+ *
+ * This function allows a transport to reserve the hardware driver for
+ * NTB usage.
+ *
+ * RETURNS: pointer to ntb_device, NULL on error.
+ */
+struct ntb_device *ntb_register_transport(struct pci_dev *pdev, void *transport)
+{
+	struct ntb_device *ndev = pci_get_drvdata(pdev);
+
+	if (ndev->ntb_transport)
+		return NULL;
+
+	ndev->ntb_transport = transport;
+	return ndev;
+}
+
+/**
+ * ntb_unregister_transport() - Unregister the transport with the NTB HW driver
+ * @ndev - ntb_device of the transport to be freed
+ *
+ * This function unregisters the transport from the HW driver and performs any
+ * necessary cleanups.
+ */
+void ntb_unregister_transport(struct ntb_device *ndev)
+{
+	int i;
+
+	if (!ndev->ntb_transport)
+		return;
+
+	for (i = 0; i < ndev->max_cbs; i++)
+		ntb_unregister_db_callback(ndev, i);
+
+	ntb_unregister_event_callback(ndev);
+	ndev->ntb_transport = NULL;
+}
+
+/**
+ * ntb_write_local_spad() - write to the secondary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register. This writes over the data mirrored to the local scratchpad register
+ * by the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
+{
+	if (idx >= ndev->limits.max_spads)
+		return -EINVAL;
+
+	dev_dbg(&ndev->pdev->dev, "Writing %x to local scratch pad index %d\n",
+		val, idx);
+	writel(val, ndev->reg_ofs.spad_read + idx * 4);
+
+	return 0;
+}
+
+/**
+ * ntb_read_local_spad() - read from the primary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to scratchpad register, 0 based
+ * @val: pointer to 32bit integer for storing the register value
+ *
+ * This function allows reading of the 32bit scratchpad register on
+ * the primary (internal) side.  This allows the local system to read data
+ * written and mirrored to the scratchpad register by the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
+{
+	if (idx >= ndev->limits.max_spads)
+		return -EINVAL;
+
+	*val = readl(ndev->reg_ofs.spad_write + idx * 4);
+	dev_dbg(&ndev->pdev->dev,
+		"Reading %x from local scratch pad index %d\n", *val, idx);
+
+	return 0;
+}
+
+/**
+ * ntb_write_remote_spad() - write to the secondary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register. The register resides on the secondary (external) side.  This allows
+ * the local system to write data to be mirrored to the remote systems
+ * scratchpad register.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val)
+{
+	if (idx >= ndev->limits.max_spads)
+		return -EINVAL;
+
+	dev_dbg(&ndev->pdev->dev, "Writing %x to remote scratch pad index %d\n",
+		val, idx);
+	writel(val, ndev->reg_ofs.spad_write + idx * 4);
+
+	return 0;
+}
+
+/**
+ * ntb_read_remote_spad() - read from the primary scratchpad register
+ * @ndev: pointer to ntb_device instance
+ * @idx: index to scratchpad register, 0 based
+ * @val: pointer to 32bit integer for storing the register value
+ *
+ * This function allows reading of the 32bit scratchpad register on
+ * the primary (internal) side.  This alloows the local system to read the data
+ * it wrote to be mirrored on the remote system.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
+{
+	if (idx >= ndev->limits.max_spads)
+		return -EINVAL;
+
+	*val = readl(ndev->reg_ofs.spad_read + idx * 4);
+	dev_dbg(&ndev->pdev->dev,
+		"Reading %x from remote scratch pad index %d\n", *val, idx);
+
+	return 0;
+}
+
+/**
+ * ntb_get_mw_vbase() - get virtual addr for the NTB memory window
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ *
+ * This function provides the base virtual address of the memory window
+ * specified.
+ *
+ * RETURNS: pointer to virtual address, or NULL on error.
+ */
+void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
+{
+	if (mw > NTB_NUM_MW)
+		return NULL;
+
+	return ndev->mw[mw].vbase;
+}
+
+/**
+ * ntb_get_mw_size() - return size of NTB memory window
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ *
+ * This function provides the physical size of the memory window specified
+ *
+ * RETURNS: the size of the memory window or zero on error
+ */
+resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
+{
+	if (mw > NTB_NUM_MW)
+		return 0;
+
+	return ndev->mw[mw].bar_sz;
+}
+
+/**
+ * ntb_set_mw_addr - set the memory window address
+ * @ndev: pointer to ntb_device instance
+ * @mw: memory window number
+ * @addr: base address for data
+ *
+ * This function sets the base physical address of the memory window.  This
+ * memory address is where data from the remote system will be transfered into
+ * or out of depending on how the transport is configured.
+ */
+void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
+{
+	if (mw > NTB_NUM_MW)
+		return;
+
+	dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
+		MW_TO_BAR(mw));
+
+	ndev->mw[mw].phys_addr = addr;
+
+	switch (MW_TO_BAR(mw)) {
+	case NTB_BAR_23:
+		writeq(addr, ndev->reg_ofs.sbar2_xlat);
+		break;
+	case NTB_BAR_45:
+		writeq(addr, ndev->reg_ofs.sbar4_xlat);
+		break;
+	}
+}
+
+/**
+ * ntb_ring_sdb() - Set the doorbell on the secondary/external side
+ * @ndev: pointer to ntb_device instance
+ * @db: doorbell to ring
+ *
+ * This function allows triggering of a doorbell on the secondary/external
+ * side that will initiate an interrupt on the remote host
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+void ntb_ring_sdb(struct ntb_device *ndev, unsigned int db)
+{
+	dev_dbg(&ndev->pdev->dev, "%s: ringing doorbell %d\n", __func__, db);
+
+	if (ndev->hw_type == BWD_HW)
+		writeq((u64) 1 << db, ndev->reg_ofs.sdb);
+	else
+		writew(((1 << ndev->bits_per_vector) - 1) <<
+		       (db * ndev->bits_per_vector), ndev->reg_ofs.sdb);
+}
+
+static void ntb_link_event(struct ntb_device *ndev, int link_state)
+{
+	unsigned int event;
+
+	if (ndev->link_status == link_state)
+		return;
+
+	if (link_state == NTB_LINK_UP) {
+		u16 status;
+
+		dev_info(&ndev->pdev->dev, "Link Up\n");
+		ndev->link_status = NTB_LINK_UP;
+		event = NTB_EVENT_HW_LINK_UP;
+
+		if (ndev->hw_type == BWD_HW)
+			status = readw(ndev->reg_ofs.lnk_stat);
+		else {
+			int rc = pci_read_config_word(ndev->pdev,
+						      SNB_LINK_STATUS_OFFSET,
+						      &status);
+			if (rc)
+				return;
+		}
+		dev_info(&ndev->pdev->dev, "Link Width %d, Link Speed %d\n",
+			 (status & NTB_LINK_WIDTH_MASK) >> 4,
+			 (status & NTB_LINK_SPEED_MASK));
+	} else {
+		dev_info(&ndev->pdev->dev, "Link Down\n");
+		ndev->link_status = NTB_LINK_DOWN;
+		event = NTB_EVENT_HW_LINK_DOWN;
+	}
+
+	/* notify the upper layer if we have an event change */
+	if (ndev->event_cb)
+		ndev->event_cb(ndev->ntb_transport, event);
+}
+
+static int ntb_link_status(struct ntb_device *ndev)
+{
+	int link_state;
+
+	if (ndev->hw_type == BWD_HW) {
+		u32 ntb_cntl;
+
+		ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
+		if (ntb_cntl & BWD_CNTL_LINK_DOWN)
+			link_state = NTB_LINK_DOWN;
+		else
+			link_state = NTB_LINK_UP;
+	} else {
+		u16 status;
+		int rc;
+
+		rc = pci_read_config_word(ndev->pdev, SNB_LINK_STATUS_OFFSET,
+					  &status);
+		if (rc)
+			return rc;
+
+		if (status & NTB_LINK_STATUS_ACTIVE)
+			link_state = NTB_LINK_UP;
+		else
+			link_state = NTB_LINK_DOWN;
+	}
+
+	ntb_link_event(ndev, link_state);
+
+	return 0;
+}
+
+/* BWD doesn't have link status interrupt, poll on that platform */
+static void bwd_link_poll(struct work_struct *work)
+{
+	struct ntb_device *ndev = container_of(work, struct ntb_device,
+					       hb_timer.work);
+	unsigned long ts = jiffies;
+
+	/* If we haven't gotten an interrupt in a while, check the BWD link
+	 * status bit
+	 */
+	if (ts > ndev->last_ts + NTB_HB_TIMEOUT) {
+		int rc = ntb_link_status(ndev);
+		if (rc)
+			dev_err(&ndev->pdev->dev,
+				"Error determining link status\n");
+	}
+
+	schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
+}
+
+static int ntb_xeon_setup(struct ntb_device *ndev)
+{
+	int rc;
+	u8 val;
+
+	ndev->hw_type = SNB_HW;
+
+	rc = pci_read_config_byte(ndev->pdev, NTB_PPD_OFFSET, &val);
+	if (rc)
+		return rc;
+
+	switch (val & SNB_PPD_CONN_TYPE) {
+	case NTB_CONN_B2B:
+		ndev->conn_type = NTB_CONN_B2B;
+		break;
+	case NTB_CONN_CLASSIC:
+	case NTB_CONN_RP:
+	default:
+		dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
+		return -EINVAL;
+	}
+
+	if (val & SNB_PPD_DEV_TYPE)
+		ndev->dev_type = NTB_DEV_DSD;
+	else
+		ndev->dev_type = NTB_DEV_USD;
+
+	ndev->reg_ofs.pdb = ndev->reg_base + SNB_PDOORBELL_OFFSET;
+	ndev->reg_ofs.pdb_mask = ndev->reg_base + SNB_PDBMSK_OFFSET;
+	ndev->reg_ofs.sbar2_xlat = ndev->reg_base + SNB_SBAR2XLAT_OFFSET;
+	ndev->reg_ofs.sbar4_xlat = ndev->reg_base + SNB_SBAR4XLAT_OFFSET;
+	ndev->reg_ofs.lnk_cntl = ndev->reg_base + SNB_NTBCNTL_OFFSET;
+	ndev->reg_ofs.lnk_stat = ndev->reg_base + SNB_LINK_STATUS_OFFSET;
+	ndev->reg_ofs.spad_read = ndev->reg_base + SNB_SPAD_OFFSET;
+	ndev->reg_ofs.spci_cmd = ndev->reg_base + SNB_PCICMD_OFFSET;
+
+	if (ndev->conn_type == NTB_CONN_B2B) {
+		ndev->reg_ofs.sdb = ndev->reg_base + SNB_B2B_DOORBELL_OFFSET;
+		ndev->reg_ofs.spad_write = ndev->reg_base + SNB_B2B_SPAD_OFFSET;
+		ndev->limits.max_spads = SNB_MAX_SPADS;
+	} else {
+		ndev->reg_ofs.sdb = ndev->reg_base + SNB_SDOORBELL_OFFSET;
+		ndev->reg_ofs.spad_write = ndev->reg_base + SNB_SPAD_OFFSET;
+		ndev->limits.max_spads = SNB_MAX_COMPAT_SPADS;
+	}
+
+	ndev->limits.max_db_bits = SNB_MAX_DB_BITS;
+	ndev->limits.msix_cnt = SNB_MSIX_CNT;
+	ndev->bits_per_vector = SNB_DB_BITS_PER_VEC;
+
+	return 0;
+}
+
+static int ntb_bwd_setup(struct ntb_device *ndev)
+{
+	int rc;
+	u32 val;
+
+	ndev->hw_type = BWD_HW;
+
+	rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &val);
+	if (rc)
+		return rc;
+
+	switch ((val & BWD_PPD_CONN_TYPE) >> 8) {
+	case NTB_CONN_B2B:
+		ndev->conn_type = NTB_CONN_B2B;
+		break;
+	case NTB_CONN_RP:
+	default:
+		dev_err(&ndev->pdev->dev, "Only B2B supported at this time\n");
+		return -EINVAL;
+	}
+
+	if (val & BWD_PPD_DEV_TYPE)
+		ndev->dev_type = NTB_DEV_DSD;
+	else
+		ndev->dev_type = NTB_DEV_USD;
+
+	/* Initiate PCI-E link training */
+	rc = pci_write_config_dword(ndev->pdev, NTB_PPD_OFFSET,
+				    val | BWD_PPD_INIT_LINK);
+	if (rc)
+		return rc;
+
+	ndev->reg_ofs.pdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
+	ndev->reg_ofs.pdb_mask = ndev->reg_base + BWD_PDBMSK_OFFSET;
+	ndev->reg_ofs.sbar2_xlat = ndev->reg_base + BWD_SBAR2XLAT_OFFSET;
+	ndev->reg_ofs.sbar4_xlat = ndev->reg_base + BWD_SBAR4XLAT_OFFSET;
+	ndev->reg_ofs.lnk_cntl = ndev->reg_base + BWD_NTBCNTL_OFFSET;
+	ndev->reg_ofs.lnk_stat = ndev->reg_base + BWD_LINK_STATUS_OFFSET;
+	ndev->reg_ofs.spad_read = ndev->reg_base + BWD_SPAD_OFFSET;
+	ndev->reg_ofs.spci_cmd = ndev->reg_base + BWD_PCICMD_OFFSET;
+
+	if (ndev->conn_type == NTB_CONN_B2B) {
+		ndev->reg_ofs.sdb = ndev->reg_base + BWD_B2B_DOORBELL_OFFSET;
+		ndev->reg_ofs.spad_write = ndev->reg_base + BWD_B2B_SPAD_OFFSET;
+		ndev->limits.max_spads = BWD_MAX_SPADS;
+	} else {
+		ndev->reg_ofs.sdb = ndev->reg_base + BWD_PDOORBELL_OFFSET;
+		ndev->reg_ofs.spad_write = ndev->reg_base + BWD_SPAD_OFFSET;
+		ndev->limits.max_spads = BWD_MAX_COMPAT_SPADS;
+	}
+
+	ndev->limits.max_db_bits = BWD_MAX_DB_BITS;
+	ndev->limits.msix_cnt = BWD_MSIX_CNT;
+	ndev->bits_per_vector = BWD_DB_BITS_PER_VEC;
+
+	/* Since bwd doesn't have a link interrupt, setup a poll timer */
+	INIT_DELAYED_WORK(&ndev->hb_timer, bwd_link_poll);
+	schedule_delayed_work(&ndev->hb_timer, NTB_HB_TIMEOUT);
+
+	return 0;
+}
+
+static int ntb_device_setup(struct ntb_device *ndev)
+{
+	int rc;
+
+	switch (ndev->pdev->device) {
+	case PCI_DEVICE_ID_INTEL_NTB_2ND_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_RP_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_RP_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
+		rc = ntb_xeon_setup(ndev);
+		break;
+	case PCI_DEVICE_ID_INTEL_NTB_B2B_BWD:
+		rc = ntb_bwd_setup(ndev);
+		break;
+	default:
+		rc = -ENODEV;
+	}
+
+	/* Enable Bus Master and Memory Space on the secondary side */
+	writew(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER, ndev->reg_ofs.spci_cmd);
+
+	return rc;
+}
+
+static void ntb_device_free(struct ntb_device *ndev)
+{
+	if (ndev->hw_type == BWD_HW)
+		cancel_delayed_work_sync(&ndev->hb_timer);
+}
+
+static irqreturn_t bwd_callback_msix_irq(int irq, void *data)
+{
+	struct ntb_db_cb *db_cb = data;
+	struct ntb_device *ndev = db_cb->ndev;
+
+	dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
+		db_cb->db_num);
+
+	if (db_cb->callback)
+		db_cb->callback(db_cb->data, db_cb->db_num);
+
+	/* No need to check for the specific HB irq, any interrupt means
+	 * we're connected.
+	 */
+	ndev->last_ts = jiffies;
+
+	writeq((u64) 1 << db_cb->db_num, ndev->reg_ofs.pdb);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t xeon_callback_msix_irq(int irq, void *data)
+{
+	struct ntb_db_cb *db_cb = data;
+	struct ntb_device *ndev = db_cb->ndev;
+
+	dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for DB %d\n", irq,
+		db_cb->db_num);
+
+	if (db_cb->callback)
+		db_cb->callback(db_cb->data, db_cb->db_num);
+
+	/* On Sandybridge, there are 16 bits in the interrupt register
+	 * but only 4 vectors.  So, 5 bits are assigned to the first 3
+	 * vectors, with the 4th having a single bit for link
+	 * interrupts.
+	 */
+	writew(((1 << ndev->bits_per_vector) - 1) <<
+	       (db_cb->db_num * ndev->bits_per_vector), ndev->reg_ofs.pdb);
+
+	return IRQ_HANDLED;
+}
+
+/* Since we do not have a HW doorbell in BWD, this is only used in JF/JT */
+static irqreturn_t xeon_event_msix_irq(int irq, void *dev)
+{
+	struct ntb_device *ndev = dev;
+	int rc;
+
+	dev_dbg(&ndev->pdev->dev, "MSI-X irq %d received for Events\n", irq);
+
+	rc = ntb_link_status(ndev);
+	if (rc)
+		dev_err(&ndev->pdev->dev, "Error determining link status\n");
+
+	/* bit 15 is always the link bit */
+	writew(1 << ndev->limits.max_db_bits, ndev->reg_ofs.pdb);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t ntb_interrupt(int irq, void *dev)
+{
+	struct ntb_device *ndev = dev;
+	unsigned int i = 0;
+
+	if (ndev->hw_type == BWD_HW) {
+		u64 pdb = readq(ndev->reg_ofs.pdb);
+
+		dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %Lx\n", irq, pdb);
+
+		while (pdb) {
+			i = __ffs(pdb);
+			pdb &= pdb - 1;
+			bwd_callback_msix_irq(irq, &ndev->db_cb[i]);
+		}
+	} else {
+		u16 pdb = readw(ndev->reg_ofs.pdb);
+
+		dev_dbg(&ndev->pdev->dev, "irq %d - pdb = %x sdb %x\n", irq,
+			pdb, readw(ndev->reg_ofs.sdb));
+
+		if (pdb & SNB_DB_HW_LINK) {
+			xeon_event_msix_irq(irq, dev);
+			pdb &= ~SNB_DB_HW_LINK;
+		}
+
+		while (pdb) {
+			i = __ffs(pdb);
+			pdb &= pdb - 1;
+			xeon_callback_msix_irq(irq, &ndev->db_cb[i]);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int ntb_setup_msix(struct ntb_device *ndev)
+{
+	struct pci_dev *pdev = ndev->pdev;
+	struct msix_entry *msix;
+	int msix_entries;
+	int rc, i, pos;
+	u16 val;
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+	if (!pos) {
+		rc = -EIO;
+		goto err;
+	}
+
+	rc = pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &val);
+	if (rc)
+		goto err;
+
+	msix_entries = msix_table_size(val);
+	if (msix_entries > ndev->limits.msix_cnt) {
+		rc = -EINVAL;
+		goto err;
+	}
+
+	ndev->msix_entries = kmalloc(sizeof(struct msix_entry) * msix_entries,
+				     GFP_KERNEL);
+	if (!ndev->msix_entries) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	for (i = 0; i < msix_entries; i++)
+		ndev->msix_entries[i].entry = i;
+
+	rc = pci_enable_msix(pdev, ndev->msix_entries, msix_entries);
+	if (rc < 0)
+		goto err1;
+	if (rc > 0) {
+		/* On SNB, the link interrupt is always tied to 4th vector.  If
+		 * we can't get all 4, then we can't use MSI-X.
+		 */
+		if (ndev->hw_type != BWD_HW) {
+			rc = -EIO;
+			goto err1;
+		}
+
+		dev_warn(&pdev->dev,
+			 "Only %d MSI-X vectors.  Limiting the number of queues to that number.\n",
+			 rc);
+		msix_entries = rc;
+	}
+
+	for (i = 0; i < msix_entries; i++) {
+		msix = &ndev->msix_entries[i];
+		WARN_ON(!msix->vector);
+
+		/* Use the last MSI-X vector for Link status */
+		if (ndev->hw_type == BWD_HW) {
+			rc = request_irq(msix->vector, bwd_callback_msix_irq, 0,
+					 "ntb-callback-msix", &ndev->db_cb[i]);
+			if (rc)
+				goto err2;
+		} else {
+			if (i == msix_entries - 1) {
+				rc = request_irq(msix->vector,
+						 xeon_event_msix_irq, 0,
+						 "ntb-event-msix", ndev);
+				if (rc)
+					goto err2;
+			} else {
+				rc = request_irq(msix->vector,
+						 xeon_callback_msix_irq, 0,
+						 "ntb-callback-msix",
+						 &ndev->db_cb[i]);
+				if (rc)
+					goto err2;
+			}
+		}
+	}
+
+	ndev->num_msix = msix_entries;
+	if (ndev->hw_type == BWD_HW)
+		ndev->max_cbs = msix_entries;
+	else
+		ndev->max_cbs = msix_entries - 1;
+
+	return 0;
+
+err2:
+	while (--i >= 0) {
+		msix = &ndev->msix_entries[i];
+		if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
+			free_irq(msix->vector, ndev);
+		else
+			free_irq(msix->vector, &ndev->db_cb[i]);
+	}
+	pci_disable_msix(pdev);
+err1:
+	kfree(ndev->msix_entries);
+	dev_err(&pdev->dev, "Error allocating MSI-X interrupt\n");
+err:
+	ndev->num_msix = 0;
+	return rc;
+}
+
+static int ntb_setup_msi(struct ntb_device *ndev)
+{
+	struct pci_dev *pdev = ndev->pdev;
+	int rc;
+
+	rc = pci_enable_msi(pdev);
+	if (rc)
+		return rc;
+
+	rc = request_irq(pdev->irq, ntb_interrupt, 0, "ntb-msi", ndev);
+	if (rc) {
+		pci_disable_msi(pdev);
+		dev_err(&pdev->dev, "Error allocating MSI interrupt\n");
+		return rc;
+	}
+
+	return 0;
+}
+
+static int ntb_setup_intx(struct ntb_device *ndev)
+{
+	struct pci_dev *pdev = ndev->pdev;
+	int rc;
+
+	pci_msi_off(pdev);
+
+	/* Verify intx is enabled */
+	pci_intx(pdev, 1);
+
+	rc = request_irq(pdev->irq, ntb_interrupt, IRQF_SHARED, "ntb-intx",
+			 ndev);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int ntb_setup_interrupts(struct ntb_device *ndev)
+{
+	int rc;
+
+	/* On BWD, disable all interrupts.  On SNB, disable all but Link
+	 * Interrupt.  The rest will be unmasked as callbacks are registered.
+	 */
+	if (ndev->hw_type == BWD_HW)
+		writeq(~0, ndev->reg_ofs.pdb_mask);
+	else
+		writew(~(1 << ndev->limits.max_db_bits),
+		       ndev->reg_ofs.pdb_mask);
+
+	rc = ntb_setup_msix(ndev);
+	if (!rc)
+		goto done;
+
+	ndev->bits_per_vector = 1;
+	ndev->max_cbs = ndev->limits.max_db_bits;
+
+	rc = ntb_setup_msi(ndev);
+	if (!rc)
+		goto done;
+
+	rc = ntb_setup_intx(ndev);
+	if (rc) {
+		dev_err(&ndev->pdev->dev, "no usable interrupts\n");
+		return rc;
+	}
+
+done:
+	return 0;
+}
+
+static void ntb_free_interrupts(struct ntb_device *ndev)
+{
+	struct pci_dev *pdev = ndev->pdev;
+
+	/* mask interrupts */
+	if (ndev->hw_type == BWD_HW)
+		writeq(~0, ndev->reg_ofs.pdb_mask);
+	else
+		writew(~0, ndev->reg_ofs.pdb_mask);
+
+	if (ndev->num_msix) {
+		struct msix_entry *msix;
+		u32 i;
+
+		for (i = 0; i < ndev->num_msix; i++) {
+			msix = &ndev->msix_entries[i];
+			if (ndev->hw_type != BWD_HW && i == ndev->num_msix - 1)
+				free_irq(msix->vector, ndev);
+			else
+				free_irq(msix->vector, &ndev->db_cb[i]);
+		}
+		pci_disable_msix(pdev);
+	} else {
+		free_irq(pdev->irq, ndev);
+
+		if (pci_dev_msi_enabled(pdev))
+			pci_disable_msi(pdev);
+	}
+}
+
+static int ntb_create_callbacks(struct ntb_device *ndev)
+{
+	int i;
+
+	/* Checken-egg issue.  We won't know how many callbacks are necessary
+	 * until we see how many MSI-X vectors we get, but these pointers need
+	 * to be passed into the MSI-X register fucntion.  So, we allocate the
+	 * max, knowing that they might not all be used, to work around this.
+	 */
+	ndev->db_cb = kcalloc(ndev->limits.max_db_bits,
+			      sizeof(struct ntb_db_cb),
+			      GFP_KERNEL);
+	if (!ndev->db_cb)
+		return -ENOMEM;
+
+	for (i = 0; i < ndev->limits.max_db_bits; i++) {
+		ndev->db_cb[i].db_num = i;
+		ndev->db_cb[i].ndev = ndev;
+	}
+
+	return 0;
+}
+
+static void ntb_free_callbacks(struct ntb_device *ndev)
+{
+	int i;
+
+	for (i = 0; i < ndev->limits.max_db_bits; i++)
+		ntb_unregister_db_callback(ndev, i);
+
+	kfree(ndev->db_cb);
+}
+
+static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct ntb_device *ndev;
+	int rc, i;
+
+	ndev = kzalloc(sizeof(struct ntb_device), GFP_KERNEL);
+	if (!ndev)
+		return -ENOMEM;
+
+	ndev->pdev = pdev;
+	ndev->link_status = NTB_LINK_DOWN;
+	pci_set_drvdata(pdev, ndev);
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		goto err;
+
+	pci_set_master(ndev->pdev);
+
+	rc = pci_request_selected_regions(pdev, NTB_BAR_MASK, KBUILD_MODNAME);
+	if (rc)
+		goto err1;
+
+	ndev->reg_base = pci_ioremap_bar(pdev, NTB_BAR_MMIO);
+	if (!ndev->reg_base) {
+		dev_warn(&pdev->dev, "Cannot remap BAR 0\n");
+		rc = -EIO;
+		goto err2;
+	}
+
+	for (i = 0; i < NTB_NUM_MW; i++) {
+		ndev->mw[i].bar_sz = pci_resource_len(pdev, MW_TO_BAR(i));
+		ndev->mw[i].vbase =
+		    ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
+			       ndev->mw[i].bar_sz);
+		dev_info(&pdev->dev, "MW %d size %d\n", i,
+			 (u32) pci_resource_len(pdev, MW_TO_BAR(i)));
+		if (!ndev->mw[i].vbase) {
+			dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
+				 MW_TO_BAR(i));
+			rc = -EIO;
+			goto err3;
+		}
+	}
+
+	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (rc) {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc)
+			goto err3;
+
+		dev_warn(&pdev->dev, "Cannot DMA highmem\n");
+	}
+
+	rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (rc) {
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (rc)
+			goto err3;
+
+		dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
+	}
+
+	rc = ntb_device_setup(ndev);
+	if (rc)
+		goto err3;
+
+	rc = ntb_create_callbacks(ndev);
+	if (rc)
+		goto err4;
+
+	rc = ntb_setup_interrupts(ndev);
+	if (rc)
+		goto err5;
+
+	/* The scratchpad registers keep the values between rmmod/insmod,
+	 * blast them now
+	 */
+	for (i = 0; i < ndev->limits.max_spads; i++) {
+		ntb_write_local_spad(ndev, i, 0);
+		ntb_write_remote_spad(ndev, i, 0);
+	}
+
+	rc = ntb_transport_init(pdev);
+	if (rc)
+		goto err6;
+
+	/* Let's bring the NTB link up */
+	writel(NTB_CNTL_BAR23_SNOOP | NTB_CNTL_BAR45_SNOOP,
+	       ndev->reg_ofs.lnk_cntl);
+
+	return 0;
+
+err6:
+	ntb_free_interrupts(ndev);
+err5:
+	ntb_free_callbacks(ndev);
+err4:
+	ntb_device_free(ndev);
+err3:
+	for (i--; i >= 0; i--)
+		iounmap(ndev->mw[i].vbase);
+	iounmap(ndev->reg_base);
+err2:
+	pci_release_selected_regions(pdev, NTB_BAR_MASK);
+err1:
+	pci_disable_device(pdev);
+err:
+	kfree(ndev);
+
+	dev_err(&pdev->dev, "Error loading %s module\n", KBUILD_MODNAME);
+	return rc;
+}
+
+static void ntb_pci_remove(struct pci_dev *pdev)
+{
+	struct ntb_device *ndev = pci_get_drvdata(pdev);
+	int i;
+	u32 ntb_cntl;
+
+	/* Bring NTB link down */
+	ntb_cntl = readl(ndev->reg_ofs.lnk_cntl);
+	ntb_cntl |= NTB_LINK_DISABLE;
+	writel(ntb_cntl, ndev->reg_ofs.lnk_cntl);
+
+	ntb_transport_free(ndev->ntb_transport);
+
+	ntb_free_interrupts(ndev);
+	ntb_free_callbacks(ndev);
+	ntb_device_free(ndev);
+
+	for (i = 0; i < NTB_NUM_MW; i++)
+		iounmap(ndev->mw[i].vbase);
+
+	iounmap(ndev->reg_base);
+	pci_release_selected_regions(pdev, NTB_BAR_MASK);
+	pci_disable_device(pdev);
+	kfree(ndev);
+}
+
+static struct pci_driver ntb_pci_driver = {
+	.name = KBUILD_MODNAME,
+	.id_table = ntb_pci_tbl,
+	.probe = ntb_pci_probe,
+	.remove = ntb_pci_remove,
+};
+module_pci_driver(ntb_pci_driver);
diff --git a/drivers/ntb/ntb_hw.h b/drivers/ntb/ntb_hw.h
new file mode 100644
index 0000000..3a3038c
--- /dev/null
+++ b/drivers/ntb/ntb_hw.h
@@ -0,0 +1,181 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_JSF		0x3725
+#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_JSF	0x3726
+#define PCI_DEVICE_ID_INTEL_NTB_RP_JSF		0x3727
+#define PCI_DEVICE_ID_INTEL_NTB_RP_SNB		0x3C08
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_SNB		0x3C0D
+#define PCI_DEVICE_ID_INTEL_NTB_CLASSIC_SNB	0x3C0E
+#define PCI_DEVICE_ID_INTEL_NTB_2ND_SNB		0x3C0F
+#define PCI_DEVICE_ID_INTEL_NTB_B2B_BWD		0x0C4E
+
+#define msix_table_size(control)	((control & PCI_MSIX_FLAGS_QSIZE)+1)
+
+#define NTB_BAR_MMIO		0
+#define NTB_BAR_23		2
+#define NTB_BAR_45		4
+#define NTB_BAR_MASK		((1 << NTB_BAR_MMIO) | (1 << NTB_BAR_23) |\
+				 (1 << NTB_BAR_45))
+
+#define NTB_LINK_DOWN		0
+#define NTB_LINK_UP		1
+
+#define NTB_HB_TIMEOUT		msecs_to_jiffies(1000)
+
+#define NTB_NUM_MW		2
+
+enum ntb_hw_event {
+	NTB_EVENT_SW_EVENT0 = 0,
+	NTB_EVENT_SW_EVENT1,
+	NTB_EVENT_SW_EVENT2,
+	NTB_EVENT_HW_ERROR,
+	NTB_EVENT_HW_LINK_UP,
+	NTB_EVENT_HW_LINK_DOWN,
+};
+
+struct ntb_mw {
+	dma_addr_t phys_addr;
+	void __iomem *vbase;
+	resource_size_t bar_sz;
+};
+
+struct ntb_db_cb {
+	void (*callback) (void *data, int db_num);
+	unsigned int db_num;
+	void *data;
+	struct ntb_device *ndev;
+};
+
+struct ntb_device {
+	struct pci_dev *pdev;
+	struct msix_entry *msix_entries;
+	void __iomem *reg_base;
+	struct ntb_mw mw[NTB_NUM_MW];
+	struct {
+		unsigned int max_spads;
+		unsigned int max_db_bits;
+		unsigned int msix_cnt;
+	} limits;
+	struct {
+		void __iomem *pdb;
+		void __iomem *pdb_mask;
+		void __iomem *sdb;
+		void __iomem *sbar2_xlat;
+		void __iomem *sbar4_xlat;
+		void __iomem *spad_write;
+		void __iomem *spad_read;
+		void __iomem *lnk_cntl;
+		void __iomem *lnk_stat;
+		void __iomem *spci_cmd;
+	} reg_ofs;
+	struct ntb_transport *ntb_transport;
+	void (*event_cb)(void *handle, enum ntb_hw_event event);
+
+	struct ntb_db_cb *db_cb;
+	unsigned char hw_type;
+	unsigned char conn_type;
+	unsigned char dev_type;
+	unsigned char num_msix;
+	unsigned char bits_per_vector;
+	unsigned char max_cbs;
+	unsigned char link_status;
+	struct delayed_work hb_timer;
+	unsigned long last_ts;
+};
+
+/**
+ * ntb_hw_link_status() - return the hardware link status
+ * @ndev: pointer to ntb_device instance
+ *
+ * Returns true if the hardware is connected to the remote system
+ *
+ * RETURNS: true or false based on the hardware link state
+ */
+static inline bool ntb_hw_link_status(struct ntb_device *ndev)
+{
+	return ndev->link_status == NTB_LINK_UP;
+}
+
+/**
+ * ntb_query_pdev() - return the pci_dev pointer
+ * @ndev: pointer to ntb_device instance
+ *
+ * Given the ntb pointer return the pci_dev pointerfor the NTB hardware device
+ *
+ * RETURNS: a pointer to the ntb pci_dev
+ */
+static inline struct pci_dev *ntb_query_pdev(struct ntb_device *ndev)
+{
+	return ndev->pdev;
+}
+
+struct ntb_device *ntb_register_transport(struct pci_dev *pdev,
+					  void *transport);
+void ntb_unregister_transport(struct ntb_device *ndev);
+void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr);
+int ntb_register_db_callback(struct ntb_device *ndev, unsigned int idx,
+			     void *data, void (*db_cb_func) (void *data,
+							     int db_num));
+void ntb_unregister_db_callback(struct ntb_device *ndev, unsigned int idx);
+int ntb_register_event_callback(struct ntb_device *ndev,
+				void (*event_cb_func) (void *handle,
+						      enum ntb_hw_event event));
+void ntb_unregister_event_callback(struct ntb_device *ndev);
+int ntb_get_max_spads(struct ntb_device *ndev);
+int ntb_write_local_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
+int ntb_read_local_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
+int ntb_write_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 val);
+int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val);
+void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw);
+resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw);
+void ntb_ring_sdb(struct ntb_device *ndev, unsigned int idx);
+void *ntb_find_transport(struct pci_dev *pdev);
+
+int ntb_transport_init(struct pci_dev *pdev);
+void ntb_transport_free(void *transport);
diff --git a/drivers/ntb/ntb_regs.h b/drivers/ntb/ntb_regs.h
new file mode 100644
index 0000000..5bfa8c0
--- /dev/null
+++ b/drivers/ntb/ntb_regs.h
@@ -0,0 +1,139 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+#define NTB_LINK_ENABLE		0x0000
+#define NTB_LINK_DISABLE	0x0002
+#define NTB_LINK_STATUS_ACTIVE	0x2000
+#define NTB_LINK_SPEED_MASK	0x000f
+#define NTB_LINK_WIDTH_MASK	0x03f0
+
+#define SNB_MSIX_CNT		4
+#define SNB_MAX_SPADS		16
+#define SNB_MAX_COMPAT_SPADS	8
+/* Reserve the uppermost bit for link interrupt */
+#define SNB_MAX_DB_BITS		15
+#define SNB_DB_BITS_PER_VEC	5
+
+#define SNB_DB_HW_LINK		0x8000
+
+#define SNB_PCICMD_OFFSET	0x0504
+#define SNB_DEVCTRL_OFFSET	0x0598
+#define SNB_LINK_STATUS_OFFSET	0x01A2
+
+#define SNB_PBAR2LMT_OFFSET	0x0000
+#define SNB_PBAR4LMT_OFFSET	0x0008
+#define SNB_PBAR2XLAT_OFFSET	0x0010
+#define SNB_PBAR4XLAT_OFFSET	0x0018
+#define SNB_SBAR2LMT_OFFSET	0x0020
+#define SNB_SBAR4LMT_OFFSET	0x0028
+#define SNB_SBAR2XLAT_OFFSET	0x0030
+#define SNB_SBAR4XLAT_OFFSET	0x0038
+#define SNB_SBAR0BASE_OFFSET	0x0040
+#define SNB_SBAR2BASE_OFFSET	0x0048
+#define SNB_SBAR4BASE_OFFSET	0x0050
+#define SNB_NTBCNTL_OFFSET	0x0058
+#define SNB_SBDF_OFFSET		0x005C
+#define SNB_PDOORBELL_OFFSET	0x0060
+#define SNB_PDBMSK_OFFSET	0x0062
+#define SNB_SDOORBELL_OFFSET	0x0064
+#define SNB_SDBMSK_OFFSET	0x0066
+#define SNB_USMEMMISS		0x0070
+#define SNB_SPAD_OFFSET		0x0080
+#define SNB_SPADSEMA4_OFFSET	0x00c0
+#define SNB_WCCNTRL_OFFSET	0x00e0
+#define SNB_B2B_SPAD_OFFSET	0x0100
+#define SNB_B2B_DOORBELL_OFFSET	0x0140
+#define SNB_B2B_XLAT_OFFSET	0x0144
+
+#define BWD_MSIX_CNT		34
+#define BWD_MAX_SPADS		16
+#define BWD_MAX_COMPAT_SPADS	16
+#define BWD_MAX_DB_BITS		34
+#define BWD_DB_BITS_PER_VEC	1
+
+#define BWD_PCICMD_OFFSET	0xb004
+#define BWD_MBAR23_OFFSET	0xb018
+#define BWD_MBAR45_OFFSET	0xb020
+#define BWD_DEVCTRL_OFFSET	0xb048
+#define BWD_LINK_STATUS_OFFSET	0xb052
+
+#define BWD_SBAR2XLAT_OFFSET	0x0008
+#define BWD_SBAR4XLAT_OFFSET	0x0010
+#define BWD_PDOORBELL_OFFSET	0x0020
+#define BWD_PDBMSK_OFFSET	0x0028
+#define BWD_NTBCNTL_OFFSET	0x0060
+#define BWD_EBDF_OFFSET		0x0064
+#define BWD_SPAD_OFFSET		0x0080
+#define BWD_SPADSEMA_OFFSET	0x00c0
+#define BWD_STKYSPAD_OFFSET	0x00c4
+#define BWD_PBAR2XLAT_OFFSET	0x8008
+#define BWD_PBAR4XLAT_OFFSET	0x8010
+#define BWD_B2B_DOORBELL_OFFSET	0x8020
+#define BWD_B2B_SPAD_OFFSET	0x8080
+#define BWD_B2B_SPADSEMA_OFFSET	0x80c0
+#define BWD_B2B_STKYSPAD_OFFSET	0x80c4
+
+#define NTB_CNTL_BAR23_SNOOP	(1 << 2)
+#define NTB_CNTL_BAR45_SNOOP	(1 << 6)
+#define BWD_CNTL_LINK_DOWN	(1 << 16)
+
+#define NTB_PPD_OFFSET		0x00D4
+#define SNB_PPD_CONN_TYPE	0x0003
+#define SNB_PPD_DEV_TYPE	0x0010
+#define BWD_PPD_INIT_LINK	0x0008
+#define BWD_PPD_CONN_TYPE	0x0300
+#define BWD_PPD_DEV_TYPE	0x1000
+
+#define BWD_PBAR2XLAT_USD_ADDR	0x0000004000000000
+#define BWD_PBAR4XLAT_USD_ADDR	0x0000008000000000
+#define BWD_MBAR23_USD_ADDR	0x000000410000000C
+#define BWD_MBAR45_USD_ADDR	0x000000810000000C
+#define BWD_PBAR2XLAT_DSD_ADDR	0x0000004100000000
+#define BWD_PBAR4XLAT_DSD_ADDR	0x0000008100000000
+#define BWD_MBAR23_DSD_ADDR	0x000000400000000C
+#define BWD_MBAR45_DSD_ADDR	0x000000800000000C
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
new file mode 100644
index 0000000..e0bdfd7
--- /dev/null
+++ b/drivers/ntb/ntb_transport.c
@@ -0,0 +1,1441 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/ntb.h>
+#include "ntb_hw.h"
+
+#define NTB_TRANSPORT_VERSION	2
+
+static unsigned int transport_mtu = 0x401E;
+module_param(transport_mtu, uint, 0644);
+MODULE_PARM_DESC(transport_mtu, "Maximum size of NTB transport packets");
+
+static unsigned char max_num_clients = 2;
+module_param(max_num_clients, byte, 0644);
+MODULE_PARM_DESC(max_num_clients, "Maximum number of NTB transport clients");
+
+struct ntb_queue_entry {
+	/* ntb_queue list reference */
+	struct list_head entry;
+	/* pointers to data to be transfered */
+	void *cb_data;
+	void *buf;
+	unsigned int len;
+	unsigned int flags;
+};
+
+struct ntb_rx_info {
+	unsigned int entry;
+};
+
+struct ntb_transport_qp {
+	struct ntb_transport *transport;
+	struct ntb_device *ndev;
+	void *cb_data;
+
+	bool client_ready;
+	bool qp_link;
+	u8 qp_num;	/* Only 64 QP's are allowed.  0-63 */
+
+	struct ntb_rx_info __iomem *rx_info;
+	struct ntb_rx_info *remote_rx_info;
+
+	void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+			    void *data, int len);
+	struct list_head tx_free_q;
+	spinlock_t ntb_tx_free_q_lock;
+	void __iomem *tx_mw;
+	unsigned int tx_index;
+	unsigned int tx_max_entry;
+	unsigned int tx_max_frame;
+
+	void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+			    void *data, int len);
+	struct tasklet_struct rx_work;
+	struct list_head rx_pend_q;
+	struct list_head rx_free_q;
+	spinlock_t ntb_rx_pend_q_lock;
+	spinlock_t ntb_rx_free_q_lock;
+	void *rx_buff;
+	unsigned int rx_index;
+	unsigned int rx_max_entry;
+	unsigned int rx_max_frame;
+
+	void (*event_handler) (void *data, int status);
+	struct delayed_work link_work;
+	struct work_struct link_cleanup;
+
+	struct dentry *debugfs_dir;
+	struct dentry *debugfs_stats;
+
+	/* Stats */
+	u64 rx_bytes;
+	u64 rx_pkts;
+	u64 rx_ring_empty;
+	u64 rx_err_no_buf;
+	u64 rx_err_oflow;
+	u64 rx_err_ver;
+	u64 tx_bytes;
+	u64 tx_pkts;
+	u64 tx_ring_full;
+};
+
+struct ntb_transport_mw {
+	size_t size;
+	void *virt_addr;
+	dma_addr_t dma_addr;
+};
+
+struct ntb_transport_client_dev {
+	struct list_head entry;
+	struct device dev;
+};
+
+struct ntb_transport {
+	struct list_head entry;
+	struct list_head client_devs;
+
+	struct ntb_device *ndev;
+	struct ntb_transport_mw mw[NTB_NUM_MW];
+	struct ntb_transport_qp *qps;
+	unsigned int max_qps;
+	unsigned long qp_bitmap;
+	bool transport_link;
+	struct delayed_work link_work;
+	struct work_struct link_cleanup;
+	struct dentry *debugfs_dir;
+};
+
+enum {
+	DESC_DONE_FLAG = 1 << 0,
+	LINK_DOWN_FLAG = 1 << 1,
+};
+
+struct ntb_payload_header {
+	unsigned int ver;
+	unsigned int len;
+	unsigned int flags;
+};
+
+enum {
+	VERSION = 0,
+	MW0_SZ,
+	MW1_SZ,
+	NUM_QPS,
+	QP_LINKS,
+	MAX_SPAD,
+};
+
+#define QP_TO_MW(qp)		((qp) % NTB_NUM_MW)
+#define NTB_QP_DEF_NUM_ENTRIES	100
+#define NTB_LINK_DOWN_TIMEOUT	10
+
+static int ntb_match_bus(struct device *dev, struct device_driver *drv)
+{
+	return !strncmp(dev_name(dev), drv->name, strlen(drv->name));
+}
+
+static int ntb_client_probe(struct device *dev)
+{
+	const struct ntb_client *drv = container_of(dev->driver,
+						    struct ntb_client, driver);
+	struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+	int rc = -EINVAL;
+
+	get_device(dev);
+	if (drv && drv->probe)
+		rc = drv->probe(pdev);
+	if (rc)
+		put_device(dev);
+
+	return rc;
+}
+
+static int ntb_client_remove(struct device *dev)
+{
+	const struct ntb_client *drv = container_of(dev->driver,
+						    struct ntb_client, driver);
+	struct pci_dev *pdev = container_of(dev->parent, struct pci_dev, dev);
+
+	if (drv && drv->remove)
+		drv->remove(pdev);
+
+	put_device(dev);
+
+	return 0;
+}
+
+static struct bus_type ntb_bus_type = {
+	.name = "ntb_bus",
+	.match = ntb_match_bus,
+	.probe = ntb_client_probe,
+	.remove = ntb_client_remove,
+};
+
+static LIST_HEAD(ntb_transport_list);
+
+static int ntb_bus_init(struct ntb_transport *nt)
+{
+	if (list_empty(&ntb_transport_list)) {
+		int rc = bus_register(&ntb_bus_type);
+		if (rc)
+			return rc;
+	}
+
+	list_add(&nt->entry, &ntb_transport_list);
+
+	return 0;
+}
+
+static void ntb_bus_remove(struct ntb_transport *nt)
+{
+	struct ntb_transport_client_dev *client_dev, *cd;
+
+	list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) {
+		dev_err(client_dev->dev.parent, "%s still attached to bus, removing\n",
+			dev_name(&client_dev->dev));
+		list_del(&client_dev->entry);
+		device_unregister(&client_dev->dev);
+	}
+
+	list_del(&nt->entry);
+
+	if (list_empty(&ntb_transport_list))
+		bus_unregister(&ntb_bus_type);
+}
+
+static void ntb_client_release(struct device *dev)
+{
+	struct ntb_transport_client_dev *client_dev;
+	client_dev = container_of(dev, struct ntb_transport_client_dev, dev);
+
+	kfree(client_dev);
+}
+
+/**
+ * ntb_unregister_client_dev - Unregister NTB client device
+ * @device_name: Name of NTB client device
+ *
+ * Unregister an NTB client device with the NTB transport layer
+ */
+void ntb_unregister_client_dev(char *device_name)
+{
+	struct ntb_transport_client_dev *client, *cd;
+	struct ntb_transport *nt;
+
+	list_for_each_entry(nt, &ntb_transport_list, entry)
+		list_for_each_entry_safe(client, cd, &nt->client_devs, entry)
+			if (!strncmp(dev_name(&client->dev), device_name,
+				     strlen(device_name))) {
+				list_del(&client->entry);
+				device_unregister(&client->dev);
+			}
+}
+EXPORT_SYMBOL_GPL(ntb_unregister_client_dev);
+
+/**
+ * ntb_register_client_dev - Register NTB client device
+ * @device_name: Name of NTB client device
+ *
+ * Register an NTB client device with the NTB transport layer
+ */
+int ntb_register_client_dev(char *device_name)
+{
+	struct ntb_transport_client_dev *client_dev;
+	struct ntb_transport *nt;
+	int rc;
+
+	if (list_empty(&ntb_transport_list))
+		return -ENODEV;
+
+	list_for_each_entry(nt, &ntb_transport_list, entry) {
+		struct device *dev;
+
+		client_dev = kzalloc(sizeof(struct ntb_transport_client_dev),
+				     GFP_KERNEL);
+		if (!client_dev) {
+			rc = -ENOMEM;
+			goto err;
+		}
+
+		dev = &client_dev->dev;
+
+		/* setup and register client devices */
+		dev_set_name(dev, "%s", device_name);
+		dev->bus = &ntb_bus_type;
+		dev->release = ntb_client_release;
+		dev->parent = &ntb_query_pdev(nt->ndev)->dev;
+
+		rc = device_register(dev);
+		if (rc) {
+			kfree(client_dev);
+			goto err;
+		}
+
+		list_add_tail(&client_dev->entry, &nt->client_devs);
+	}
+
+	return 0;
+
+err:
+	ntb_unregister_client_dev(device_name);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ntb_register_client_dev);
+
+/**
+ * ntb_register_client - Register NTB client driver
+ * @drv: NTB client driver to be registered
+ *
+ * Register an NTB client driver with the NTB transport layer
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_register_client(struct ntb_client *drv)
+{
+	drv->driver.bus = &ntb_bus_type;
+
+	if (list_empty(&ntb_transport_list))
+		return -ENODEV;
+
+	return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(ntb_register_client);
+
+/**
+ * ntb_unregister_client - Unregister NTB client driver
+ * @drv: NTB client driver to be unregistered
+ *
+ * Unregister an NTB client driver with the NTB transport layer
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+void ntb_unregister_client(struct ntb_client *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(ntb_unregister_client);
+
+static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
+			    loff_t *offp)
+{
+	struct ntb_transport_qp *qp;
+	char *buf;
+	ssize_t ret, out_offset, out_count;
+
+	out_count = 600;
+
+	buf = kmalloc(out_count, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	qp = filp->private_data;
+	out_offset = 0;
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "NTB QP stats\n");
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_bytes - \t%llu\n", qp->rx_bytes);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_pkts - \t%llu\n", qp->rx_pkts);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_ring_empty - %llu\n", qp->rx_ring_empty);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_err_no_buf - %llu\n", qp->rx_err_no_buf);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_err_oflow - \t%llu\n", qp->rx_err_oflow);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_err_ver - \t%llu\n", qp->rx_err_ver);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_buff - \t%p\n", qp->rx_buff);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_index - \t%u\n", qp->rx_index);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "rx_max_entry - \t%u\n", qp->rx_max_entry);
+
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_bytes - \t%llu\n", qp->tx_bytes);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_pkts - \t%llu\n", qp->tx_pkts);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_ring_full - \t%llu\n", qp->tx_ring_full);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_mw - \t%p\n", qp->tx_mw);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_index - \t%u\n", qp->tx_index);
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "tx_max_entry - \t%u\n", qp->tx_max_entry);
+
+	out_offset += snprintf(buf + out_offset, out_count - out_offset,
+			       "\nQP Link %s\n", (qp->qp_link == NTB_LINK_UP) ?
+			       "Up" : "Down");
+	if (out_offset > out_count)
+		out_offset = out_count;
+
+	ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
+	kfree(buf);
+	return ret;
+}
+
+static const struct file_operations ntb_qp_debugfs_stats = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = debugfs_read,
+};
+
+static void ntb_list_add(spinlock_t *lock, struct list_head *entry,
+			 struct list_head *list)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+	list_add_tail(entry, list);
+	spin_unlock_irqrestore(lock, flags);
+}
+
+static struct ntb_queue_entry *ntb_list_rm(spinlock_t *lock,
+						struct list_head *list)
+{
+	struct ntb_queue_entry *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(lock, flags);
+	if (list_empty(list)) {
+		entry = NULL;
+		goto out;
+	}
+	entry = list_first_entry(list, struct ntb_queue_entry, entry);
+	list_del(&entry->entry);
+out:
+	spin_unlock_irqrestore(lock, flags);
+
+	return entry;
+}
+
+static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
+				      unsigned int qp_num)
+{
+	struct ntb_transport_qp *qp = &nt->qps[qp_num];
+	unsigned int rx_size, num_qps_mw;
+	u8 mw_num = QP_TO_MW(qp_num);
+	unsigned int i;
+
+	WARN_ON(nt->mw[mw_num].virt_addr == NULL);
+
+	if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
+		num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
+	else
+		num_qps_mw = nt->max_qps / NTB_NUM_MW;
+
+	rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
+	qp->remote_rx_info = nt->mw[mw_num].virt_addr +
+			     (qp_num / NTB_NUM_MW * rx_size);
+	rx_size -= sizeof(struct ntb_rx_info);
+
+	qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info);
+	qp->rx_max_frame = min(transport_mtu, rx_size);
+	qp->rx_max_entry = rx_size / qp->rx_max_frame;
+	qp->rx_index = 0;
+
+	qp->remote_rx_info->entry = qp->rx_max_entry;
+
+	/* setup the hdr offsets with 0's */
+	for (i = 0; i < qp->rx_max_entry; i++) {
+		void *offset = qp->rx_buff + qp->rx_max_frame * (i + 1) -
+			       sizeof(struct ntb_payload_header);
+		memset(offset, 0, sizeof(struct ntb_payload_header));
+	}
+
+	qp->rx_pkts = 0;
+	qp->tx_pkts = 0;
+}
+
+static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
+{
+	struct ntb_transport_mw *mw = &nt->mw[num_mw];
+	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+	/* Alloc memory for receiving data.  Must be 4k aligned */
+	mw->size = ALIGN(size, 4096);
+
+	mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
+					   GFP_KERNEL);
+	if (!mw->virt_addr) {
+		dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
+		       (int) mw->size);
+		return -ENOMEM;
+	}
+
+	/* Notify HW the memory location of the receive buffer */
+	ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr);
+
+	return 0;
+}
+
+static void ntb_qp_link_cleanup(struct work_struct *work)
+{
+	struct ntb_transport_qp *qp = container_of(work,
+						   struct ntb_transport_qp,
+						   link_cleanup);
+	struct ntb_transport *nt = qp->transport;
+	struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
+
+	if (qp->qp_link == NTB_LINK_DOWN) {
+		cancel_delayed_work_sync(&qp->link_work);
+		return;
+	}
+
+	if (qp->event_handler)
+		qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
+
+	dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+	qp->qp_link = NTB_LINK_DOWN;
+
+	if (nt->transport_link == NTB_LINK_UP)
+		schedule_delayed_work(&qp->link_work,
+				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_qp_link_down(struct ntb_transport_qp *qp)
+{
+	schedule_work(&qp->link_cleanup);
+}
+
+static void ntb_transport_link_cleanup(struct work_struct *work)
+{
+	struct ntb_transport *nt = container_of(work, struct ntb_transport,
+						link_cleanup);
+	int i;
+
+	if (nt->transport_link == NTB_LINK_DOWN)
+		cancel_delayed_work_sync(&nt->link_work);
+	else
+		nt->transport_link = NTB_LINK_DOWN;
+
+	/* Pass along the info to any clients */
+	for (i = 0; i < nt->max_qps; i++)
+		if (!test_bit(i, &nt->qp_bitmap))
+			ntb_qp_link_down(&nt->qps[i]);
+
+	/* The scratchpad registers keep the values if the remote side
+	 * goes down, blast them now to give them a sane value the next
+	 * time they are accessed
+	 */
+	for (i = 0; i < MAX_SPAD; i++)
+		ntb_write_local_spad(nt->ndev, i, 0);
+}
+
+static void ntb_transport_event_callback(void *data, enum ntb_hw_event event)
+{
+	struct ntb_transport *nt = data;
+
+	switch (event) {
+	case NTB_EVENT_HW_LINK_UP:
+		schedule_delayed_work(&nt->link_work, 0);
+		break;
+	case NTB_EVENT_HW_LINK_DOWN:
+		schedule_work(&nt->link_cleanup);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void ntb_transport_link_work(struct work_struct *work)
+{
+	struct ntb_transport *nt = container_of(work, struct ntb_transport,
+						link_work.work);
+	struct ntb_device *ndev = nt->ndev;
+	struct pci_dev *pdev = ntb_query_pdev(ndev);
+	u32 val;
+	int rc, i;
+
+	/* send the local info */
+	rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
+	if (rc) {
+		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+			0, VERSION);
+		goto out;
+	}
+
+	rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0));
+	if (rc) {
+		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+			(u32) ntb_get_mw_size(ndev, 0), MW0_SZ);
+		goto out;
+	}
+
+	rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1));
+	if (rc) {
+		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+			(u32) ntb_get_mw_size(ndev, 1), MW1_SZ);
+		goto out;
+	}
+
+	rc = ntb_write_remote_spad(ndev, NUM_QPS, nt->max_qps);
+	if (rc) {
+		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+			nt->max_qps, NUM_QPS);
+		goto out;
+	}
+
+	rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
+	if (rc) {
+		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+		goto out;
+	}
+
+	rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
+	if (rc) {
+		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+			val, QP_LINKS);
+		goto out;
+	}
+
+	/* Query the remote side for its info */
+	rc = ntb_read_remote_spad(ndev, VERSION, &val);
+	if (rc) {
+		dev_err(&pdev->dev, "Error reading remote spad %d\n", VERSION);
+		goto out;
+	}
+
+	if (val != NTB_TRANSPORT_VERSION)
+		goto out;
+	dev_dbg(&pdev->dev, "Remote version = %d\n", val);
+
+	rc = ntb_read_remote_spad(ndev, NUM_QPS, &val);
+	if (rc) {
+		dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_QPS);
+		goto out;
+	}
+
+	if (val != nt->max_qps)
+		goto out;
+	dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
+
+	rc = ntb_read_remote_spad(ndev, MW0_SZ, &val);
+	if (rc) {
+		dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ);
+		goto out;
+	}
+
+	if (!val)
+		goto out;
+	dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val);
+
+	rc = ntb_set_mw(nt, 0, val);
+	if (rc)
+		goto out;
+
+	rc = ntb_read_remote_spad(ndev, MW1_SZ, &val);
+	if (rc) {
+		dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ);
+		goto out;
+	}
+
+	if (!val)
+		goto out;
+	dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
+
+	rc = ntb_set_mw(nt, 1, val);
+	if (rc)
+		goto out;
+
+	nt->transport_link = NTB_LINK_UP;
+
+	for (i = 0; i < nt->max_qps; i++) {
+		struct ntb_transport_qp *qp = &nt->qps[i];
+
+		ntb_transport_setup_qp_mw(nt, i);
+
+		if (qp->client_ready == NTB_LINK_UP)
+			schedule_delayed_work(&qp->link_work, 0);
+	}
+
+	return;
+
+out:
+	if (ntb_hw_link_status(ndev))
+		schedule_delayed_work(&nt->link_work,
+				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_qp_link_work(struct work_struct *work)
+{
+	struct ntb_transport_qp *qp = container_of(work,
+						   struct ntb_transport_qp,
+						   link_work.work);
+	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+	struct ntb_transport *nt = qp->transport;
+	int rc, val;
+
+	WARN_ON(nt->transport_link != NTB_LINK_UP);
+
+	rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val);
+	if (rc) {
+		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+		return;
+	}
+
+	rc = ntb_write_remote_spad(nt->ndev, QP_LINKS, val | 1 << qp->qp_num);
+	if (rc)
+		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+			val | 1 << qp->qp_num, QP_LINKS);
+
+	/* query remote spad for qp ready bits */
+	rc = ntb_read_remote_spad(nt->ndev, QP_LINKS, &val);
+	if (rc)
+		dev_err(&pdev->dev, "Error reading remote spad %d\n", QP_LINKS);
+
+	dev_dbg(&pdev->dev, "Remote QP link status = %x\n", val);
+
+	/* See if the remote side is up */
+	if (1 << qp->qp_num & val) {
+		qp->qp_link = NTB_LINK_UP;
+
+		dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
+		if (qp->event_handler)
+			qp->event_handler(qp->cb_data, NTB_LINK_UP);
+	} else if (nt->transport_link == NTB_LINK_UP)
+		schedule_delayed_work(&qp->link_work,
+				      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
+}
+
+static void ntb_transport_init_queue(struct ntb_transport *nt,
+				     unsigned int qp_num)
+{
+	struct ntb_transport_qp *qp;
+	unsigned int num_qps_mw, tx_size;
+	u8 mw_num = QP_TO_MW(qp_num);
+
+	qp = &nt->qps[qp_num];
+	qp->qp_num = qp_num;
+	qp->transport = nt;
+	qp->ndev = nt->ndev;
+	qp->qp_link = NTB_LINK_DOWN;
+	qp->client_ready = NTB_LINK_DOWN;
+	qp->event_handler = NULL;
+
+	if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
+		num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
+	else
+		num_qps_mw = nt->max_qps / NTB_NUM_MW;
+
+	tx_size = (unsigned int) ntb_get_mw_size(qp->ndev, mw_num) / num_qps_mw;
+	qp->rx_info = ntb_get_mw_vbase(nt->ndev, mw_num) +
+		      (qp_num / NTB_NUM_MW * tx_size);
+	tx_size -= sizeof(struct ntb_rx_info);
+
+	qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info);
+	qp->tx_max_frame = min(transport_mtu, tx_size);
+	qp->tx_max_entry = tx_size / qp->tx_max_frame;
+	qp->tx_index = 0;
+
+	if (nt->debugfs_dir) {
+		char debugfs_name[4];
+
+		snprintf(debugfs_name, 4, "qp%d", qp_num);
+		qp->debugfs_dir = debugfs_create_dir(debugfs_name,
+						     nt->debugfs_dir);
+
+		qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
+							qp->debugfs_dir, qp,
+							&ntb_qp_debugfs_stats);
+	}
+
+	INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
+	INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup);
+
+	spin_lock_init(&qp->ntb_rx_pend_q_lock);
+	spin_lock_init(&qp->ntb_rx_free_q_lock);
+	spin_lock_init(&qp->ntb_tx_free_q_lock);
+
+	INIT_LIST_HEAD(&qp->rx_pend_q);
+	INIT_LIST_HEAD(&qp->rx_free_q);
+	INIT_LIST_HEAD(&qp->tx_free_q);
+}
+
+int ntb_transport_init(struct pci_dev *pdev)
+{
+	struct ntb_transport *nt;
+	int rc, i;
+
+	nt = kzalloc(sizeof(struct ntb_transport), GFP_KERNEL);
+	if (!nt)
+		return -ENOMEM;
+
+	if (debugfs_initialized())
+		nt->debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	else
+		nt->debugfs_dir = NULL;
+
+	nt->ndev = ntb_register_transport(pdev, nt);
+	if (!nt->ndev) {
+		rc = -EIO;
+		goto err;
+	}
+
+	nt->max_qps = min(nt->ndev->max_cbs, max_num_clients);
+
+	nt->qps = kcalloc(nt->max_qps, sizeof(struct ntb_transport_qp),
+			  GFP_KERNEL);
+	if (!nt->qps) {
+		rc = -ENOMEM;
+		goto err1;
+	}
+
+	nt->qp_bitmap = ((u64) 1 << nt->max_qps) - 1;
+
+	for (i = 0; i < nt->max_qps; i++)
+		ntb_transport_init_queue(nt, i);
+
+	INIT_DELAYED_WORK(&nt->link_work, ntb_transport_link_work);
+	INIT_WORK(&nt->link_cleanup, ntb_transport_link_cleanup);
+
+	rc = ntb_register_event_callback(nt->ndev,
+					 ntb_transport_event_callback);
+	if (rc)
+		goto err2;
+
+	INIT_LIST_HEAD(&nt->client_devs);
+	rc = ntb_bus_init(nt);
+	if (rc)
+		goto err3;
+
+	if (ntb_hw_link_status(nt->ndev))
+		schedule_delayed_work(&nt->link_work, 0);
+
+	return 0;
+
+err3:
+	ntb_unregister_event_callback(nt->ndev);
+err2:
+	kfree(nt->qps);
+err1:
+	ntb_unregister_transport(nt->ndev);
+err:
+	debugfs_remove_recursive(nt->debugfs_dir);
+	kfree(nt);
+	return rc;
+}
+
+void ntb_transport_free(void *transport)
+{
+	struct ntb_transport *nt = transport;
+	struct pci_dev *pdev;
+	int i;
+
+	nt->transport_link = NTB_LINK_DOWN;
+
+	/* verify that all the qp's are freed */
+	for (i = 0; i < nt->max_qps; i++)
+		if (!test_bit(i, &nt->qp_bitmap))
+			ntb_transport_free_queue(&nt->qps[i]);
+
+	ntb_bus_remove(nt);
+
+	cancel_delayed_work_sync(&nt->link_work);
+
+	debugfs_remove_recursive(nt->debugfs_dir);
+
+	ntb_unregister_event_callback(nt->ndev);
+
+	pdev = ntb_query_pdev(nt->ndev);
+
+	for (i = 0; i < NTB_NUM_MW; i++)
+		if (nt->mw[i].virt_addr)
+			dma_free_coherent(&pdev->dev, nt->mw[i].size,
+					  nt->mw[i].virt_addr,
+					  nt->mw[i].dma_addr);
+
+	kfree(nt->qps);
+	ntb_unregister_transport(nt->ndev);
+	kfree(nt);
+}
+
+static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
+			     struct ntb_queue_entry *entry, void *offset)
+{
+	void *cb_data = entry->cb_data;
+	unsigned int len = entry->len;
+
+	memcpy(entry->buf, offset, entry->len);
+
+	ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+
+	if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
+		qp->rx_handler(qp, qp->cb_data, cb_data, len);
+}
+
+static int ntb_process_rxc(struct ntb_transport_qp *qp)
+{
+	struct ntb_payload_header *hdr;
+	struct ntb_queue_entry *entry;
+	void *offset;
+
+	offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
+	hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
+
+	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+	if (!entry) {
+		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+			"no buffer - HDR ver %u, len %d, flags %x\n",
+			hdr->ver, hdr->len, hdr->flags);
+		qp->rx_err_no_buf++;
+		return -ENOMEM;
+	}
+
+	if (!(hdr->flags & DESC_DONE_FLAG)) {
+		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+			     &qp->rx_pend_q);
+		qp->rx_ring_empty++;
+		return -EAGAIN;
+	}
+
+	if (hdr->ver != (u32) qp->rx_pkts) {
+		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+			"qp %d: version mismatch, expected %llu - got %u\n",
+			qp->qp_num, qp->rx_pkts, hdr->ver);
+		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+			     &qp->rx_pend_q);
+		qp->rx_err_ver++;
+		return -EIO;
+	}
+
+	if (hdr->flags & LINK_DOWN_FLAG) {
+		ntb_qp_link_down(qp);
+
+		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+			     &qp->rx_pend_q);
+		goto out;
+	}
+
+	dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+		"rx offset %u, ver %u - %d payload received, buf size %d\n",
+		qp->rx_index, hdr->ver, hdr->len, entry->len);
+
+	if (hdr->len <= entry->len) {
+		entry->len = hdr->len;
+		ntb_rx_copy_task(qp, entry, offset);
+	} else {
+		ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry,
+			     &qp->rx_pend_q);
+
+		qp->rx_err_oflow++;
+		dev_dbg(&ntb_query_pdev(qp->ndev)->dev,
+			"RX overflow! Wanted %d got %d\n",
+			hdr->len, entry->len);
+	}
+
+	qp->rx_bytes += hdr->len;
+	qp->rx_pkts++;
+
+out:
+	/* Ensure that the data is fully copied out before clearing the flag */
+	wmb();
+	hdr->flags = 0;
+	iowrite32(qp->rx_index, &qp->rx_info->entry);
+
+	qp->rx_index++;
+	qp->rx_index %= qp->rx_max_entry;
+
+	return 0;
+}
+
+static void ntb_transport_rx(unsigned long data)
+{
+	struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
+	int rc;
+
+	do {
+		rc = ntb_process_rxc(qp);
+	} while (!rc);
+}
+
+static void ntb_transport_rxc_db(void *data, int db_num)
+{
+	struct ntb_transport_qp *qp = data;
+
+	dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%s: doorbell %d received\n",
+		__func__, db_num);
+
+	tasklet_schedule(&qp->rx_work);
+}
+
+static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
+			     struct ntb_queue_entry *entry,
+			     void __iomem *offset)
+{
+	struct ntb_payload_header __iomem *hdr;
+
+	memcpy_toio(offset, entry->buf, entry->len);
+
+	hdr = offset + qp->tx_max_frame - sizeof(struct ntb_payload_header);
+	iowrite32(entry->len, &hdr->len);
+	iowrite32((u32) qp->tx_pkts, &hdr->ver);
+
+	/* Ensure that the data is fully copied out before setting the flag */
+	wmb();
+	iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags);
+
+	ntb_ring_sdb(qp->ndev, qp->qp_num);
+
+	/* The entry length can only be zero if the packet is intended to be a
+	 * "link down" or similar.  Since no payload is being sent in these
+	 * cases, there is nothing to add to the completion queue.
+	 */
+	if (entry->len > 0) {
+		qp->tx_bytes += entry->len;
+
+		if (qp->tx_handler)
+			qp->tx_handler(qp, qp->cb_data, entry->cb_data,
+				       entry->len);
+	}
+
+	ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q);
+}
+
+static int ntb_process_tx(struct ntb_transport_qp *qp,
+			  struct ntb_queue_entry *entry)
+{
+	void __iomem *offset;
+
+	offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
+
+	dev_dbg(&ntb_query_pdev(qp->ndev)->dev, "%lld - offset %p, tx %u, entry len %d flags %x buff %p\n",
+		qp->tx_pkts, offset, qp->tx_index, entry->len, entry->flags,
+		entry->buf);
+	if (qp->tx_index == qp->remote_rx_info->entry) {
+		qp->tx_ring_full++;
+		return -EAGAIN;
+	}
+
+	if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
+		if (qp->tx_handler)
+			qp->tx_handler(qp->cb_data, qp, NULL, -EIO);
+
+		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+			     &qp->tx_free_q);
+		return 0;
+	}
+
+	ntb_tx_copy_task(qp, entry, offset);
+
+	qp->tx_index++;
+	qp->tx_index %= qp->tx_max_entry;
+
+	qp->tx_pkts++;
+
+	return 0;
+}
+
+static void ntb_send_link_down(struct ntb_transport_qp *qp)
+{
+	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+	struct ntb_queue_entry *entry;
+	int i, rc;
+
+	if (qp->qp_link == NTB_LINK_DOWN)
+		return;
+
+	qp->qp_link = NTB_LINK_DOWN;
+	dev_info(&pdev->dev, "qp %d: Link Down\n", qp->qp_num);
+
+	for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
+		entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+		if (entry)
+			break;
+		msleep(100);
+	}
+
+	if (!entry)
+		return;
+
+	entry->cb_data = NULL;
+	entry->buf = NULL;
+	entry->len = 0;
+	entry->flags = LINK_DOWN_FLAG;
+
+	rc = ntb_process_tx(qp, entry);
+	if (rc)
+		dev_err(&pdev->dev, "ntb: QP%d unable to send linkdown msg\n",
+			qp->qp_num);
+}
+
+/**
+ * ntb_transport_create_queue - Create a new NTB transport layer queue
+ * @rx_handler: receive callback function
+ * @tx_handler: transmit callback function
+ * @event_handler: event callback function
+ *
+ * Create a new NTB transport layer queue and provide the queue with a callback
+ * routine for both transmit and receive.  The receive callback routine will be
+ * used to pass up data when the transport has received it on the queue.   The
+ * transmit callback routine will be called when the transport has completed the
+ * transmission of the data on the queue and the data is ready to be freed.
+ *
+ * RETURNS: pointer to newly created ntb_queue, NULL on error.
+ */
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct pci_dev *pdev,
+			   const struct ntb_queue_handlers *handlers)
+{
+	struct ntb_queue_entry *entry;
+	struct ntb_transport_qp *qp;
+	struct ntb_transport *nt;
+	unsigned int free_queue;
+	int rc, i;
+
+	nt = ntb_find_transport(pdev);
+	if (!nt)
+		goto err;
+
+	free_queue = ffs(nt->qp_bitmap);
+	if (!free_queue)
+		goto err;
+
+	/* decrement free_queue to make it zero based */
+	free_queue--;
+
+	clear_bit(free_queue, &nt->qp_bitmap);
+
+	qp = &nt->qps[free_queue];
+	qp->cb_data = data;
+	qp->rx_handler = handlers->rx_handler;
+	qp->tx_handler = handlers->tx_handler;
+	qp->event_handler = handlers->event_handler;
+
+	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+		entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+		if (!entry)
+			goto err1;
+
+		ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry,
+			     &qp->rx_free_q);
+	}
+
+	for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
+		entry = kzalloc(sizeof(struct ntb_queue_entry), GFP_ATOMIC);
+		if (!entry)
+			goto err2;
+
+		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+			     &qp->tx_free_q);
+	}
+
+	tasklet_init(&qp->rx_work, ntb_transport_rx, (unsigned long) qp);
+
+	rc = ntb_register_db_callback(qp->ndev, free_queue, qp,
+				      ntb_transport_rxc_db);
+	if (rc)
+		goto err3;
+
+	dev_info(&pdev->dev, "NTB Transport QP %d created\n", qp->qp_num);
+
+	return qp;
+
+err3:
+	tasklet_disable(&qp->rx_work);
+err2:
+	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
+		kfree(entry);
+err1:
+	while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+		kfree(entry);
+	set_bit(free_queue, &nt->qp_bitmap);
+err:
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
+
+/**
+ * ntb_transport_free_queue - Frees NTB transport queue
+ * @qp: NTB queue to be freed
+ *
+ * Frees NTB transport queue
+ */
+void ntb_transport_free_queue(struct ntb_transport_qp *qp)
+{
+	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+	struct ntb_queue_entry *entry;
+
+	if (!qp)
+		return;
+
+	cancel_delayed_work_sync(&qp->link_work);
+
+	ntb_unregister_db_callback(qp->ndev, qp->qp_num);
+	tasklet_disable(&qp->rx_work);
+
+	while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
+		kfree(entry);
+
+	while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) {
+		dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n");
+		kfree(entry);
+	}
+
+	while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
+		kfree(entry);
+
+	set_bit(qp->qp_num, &qp->transport->qp_bitmap);
+
+	dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_free_queue);
+
+/**
+ * ntb_transport_rx_remove - Dequeues enqueued rx packet
+ * @qp: NTB queue to be freed
+ * @len: pointer to variable to write enqueued buffers length
+ *
+ * Dequeues unused buffers from receive queue.  Should only be used during
+ * shutdown of qp.
+ *
+ * RETURNS: NULL error value on error, or void* for success.
+ */
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
+{
+	struct ntb_queue_entry *entry;
+	void *buf;
+
+	if (!qp || qp->client_ready == NTB_LINK_UP)
+		return NULL;
+
+	entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
+	if (!entry)
+		return NULL;
+
+	buf = entry->cb_data;
+	*len = entry->len;
+
+	ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q);
+
+	return buf;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_rx_remove);
+
+/**
+ * ntb_transport_rx_enqueue - Enqueue a new NTB queue entry
+ * @qp: NTB transport layer queue the entry is to be enqueued on
+ * @cb: per buffer pointer for callback function to use
+ * @data: pointer to data buffer that incoming packets will be copied into
+ * @len: length of the data buffer
+ *
+ * Enqueue a new receive buffer onto the transport queue into which a NTB
+ * payload can be received into.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+			     unsigned int len)
+{
+	struct ntb_queue_entry *entry;
+
+	if (!qp)
+		return -EINVAL;
+
+	entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->cb_data = cb;
+	entry->buf = data;
+	entry->len = len;
+
+	ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_rx_enqueue);
+
+/**
+ * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
+ * @qp: NTB transport layer queue the entry is to be enqueued on
+ * @cb: per buffer pointer for callback function to use
+ * @data: pointer to data buffer that will be sent
+ * @len: length of the data buffer
+ *
+ * Enqueue a new transmit buffer onto the transport queue from which a NTB
+ * payload will be transmitted.  This assumes that a lock is behing held to
+ * serialize access to the qp.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+			     unsigned int len)
+{
+	struct ntb_queue_entry *entry;
+	int rc;
+
+	if (!qp || qp->qp_link != NTB_LINK_UP || !len)
+		return -EINVAL;
+
+	entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
+	if (!entry)
+		return -ENOMEM;
+
+	entry->cb_data = cb;
+	entry->buf = data;
+	entry->len = len;
+	entry->flags = 0;
+
+	rc = ntb_process_tx(qp, entry);
+	if (rc)
+		ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry,
+			     &qp->tx_free_q);
+
+	return rc;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_tx_enqueue);
+
+/**
+ * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
+ * @qp: NTB transport layer queue to be enabled
+ *
+ * Notify NTB transport layer of client readiness to use queue
+ */
+void ntb_transport_link_up(struct ntb_transport_qp *qp)
+{
+	if (!qp)
+		return;
+
+	qp->client_ready = NTB_LINK_UP;
+
+	if (qp->transport->transport_link == NTB_LINK_UP)
+		schedule_delayed_work(&qp->link_work, 0);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_up);
+
+/**
+ * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
+ * @qp: NTB transport layer queue to be disabled
+ *
+ * Notify NTB transport layer of client's desire to no longer receive data on
+ * transport queue specified.  It is the client's responsibility to ensure all
+ * entries on queue are purged or otherwise handled appropraitely.
+ */
+void ntb_transport_link_down(struct ntb_transport_qp *qp)
+{
+	struct pci_dev *pdev = ntb_query_pdev(qp->ndev);
+	int rc, val;
+
+	if (!qp)
+		return;
+
+	qp->client_ready = NTB_LINK_DOWN;
+
+	rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
+	if (rc) {
+		dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
+		return;
+	}
+
+	rc = ntb_write_remote_spad(qp->ndev, QP_LINKS,
+				   val & ~(1 << qp->qp_num));
+	if (rc)
+		dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
+			val & ~(1 << qp->qp_num), QP_LINKS);
+
+	if (qp->qp_link == NTB_LINK_UP)
+		ntb_send_link_down(qp);
+	else
+		cancel_delayed_work_sync(&qp->link_work);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_down);
+
+/**
+ * ntb_transport_link_query - Query transport link state
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query connectivity to the remote system of the NTB transport queue
+ *
+ * RETURNS: true for link up or false for link down
+ */
+bool ntb_transport_link_query(struct ntb_transport_qp *qp)
+{
+	return qp->qp_link == NTB_LINK_UP;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_link_query);
+
+/**
+ * ntb_transport_qp_num - Query the qp number
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query qp number of the NTB transport queue
+ *
+ * RETURNS: a zero based number specifying the qp number
+ */
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
+{
+	return qp->qp_num;
+}
+EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
+
+/**
+ * ntb_transport_max_size - Query the max payload size of a qp
+ * @qp: NTB transport layer queue to be queried
+ *
+ * Query the maximum payload size permissible on the given qp
+ *
+ * RETURNS: the max payload size of a qp
+ */
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
+{
+	return qp->tx_max_frame - sizeof(struct ntb_payload_header);
+}
+EXPORT_SYMBOL_GPL(ntb_transport_max_size);
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 3578e1c..519c4d6 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -133,8 +133,6 @@
 		goto err_out_free_res;
 	}
 
-	pci_set_drvdata(dev, &sockets[i].socket);
-
 	for (i = 0; i<socket_count; i++) {
 		sockets[i].socket.dev.parent = &dev->dev;
 		sockets[i].socket.ops = &i82092aa_operations;
@@ -164,14 +162,14 @@
 
 static void i82092aa_pci_remove(struct pci_dev *dev)
 {
-	struct pcmcia_socket *socket = pci_get_drvdata(dev);
+	int i;
 
 	enter("i82092aa_pci_remove");
 	
 	free_irq(dev->irq, i82092aa_interrupt);
 
-	if (socket)
-		pcmcia_unregister_socket(socket);
+	for (i = 0; i < socket_count; i++)
+		pcmcia_unregister_socket(&sockets[i].socket);
 
 	leave("i82092aa_pci_remove");
 }
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index 75806be..d98a086 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -246,6 +246,7 @@
 	socket = &vrc4171_sockets[slot];
 	socket->csc_irq = search_nonuse_irq();
 	socket->io_irq = search_nonuse_irq();
+	spin_lock_init(&socket->lock);
 
 	return 0;
 }
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 0144078..270b3cf 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1410,13 +1410,13 @@
 
 static const struct hv_vmbus_device_id id_table[] = {
 	/* SCSI guid */
-	{ VMBUS_DEVICE(0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
-		       0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f)
-	  .driver_data = SCSI_GUID },
+	{ HV_SCSI_GUID,
+	  .driver_data = SCSI_GUID
+	},
 	/* IDE guid */
-	{ VMBUS_DEVICE(0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
-		       0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5)
-	  .driver_data = IDE_GUID },
+	{ HV_IDE_GUID,
+	  .driver_data = IDE_GUID
+	},
 	{ },
 };
 
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 95a9f71..5e6c7d7 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1376,6 +1376,7 @@
 	return 0;
 
 err_reg:
+	put_device(&vdev->dev);
 	kfree(vdev);
 err_devalloc:
 	list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index 7c294f4..96cab6a 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -13,6 +13,7 @@
 
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/pm.h>
 #include <linux/platform_device.h>
@@ -459,43 +460,34 @@
 	if (!pdev)
 		return -ENODEV;
 
-	ds1wm_data = kzalloc(sizeof(*ds1wm_data), GFP_KERNEL);
+	ds1wm_data = devm_kzalloc(&pdev->dev, sizeof(*ds1wm_data), GFP_KERNEL);
 	if (!ds1wm_data)
 		return -ENOMEM;
 
 	platform_set_drvdata(pdev, ds1wm_data);
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res) {
-		ret = -ENXIO;
-		goto err0;
-	}
-	ds1wm_data->map = ioremap(res->start, resource_size(res));
-	if (!ds1wm_data->map) {
-		ret = -ENOMEM;
-		goto err0;
-	}
+	if (!res)
+		return -ENXIO;
+	ds1wm_data->map = devm_ioremap(&pdev->dev, res->start,
+				       resource_size(res));
+	if (!ds1wm_data->map)
+		return -ENOMEM;
 
 	/* calculate bus shift from mem resource */
 	ds1wm_data->bus_shift = resource_size(res) >> 3;
 
 	ds1wm_data->pdev = pdev;
 	ds1wm_data->cell = mfd_get_cell(pdev);
-	if (!ds1wm_data->cell) {
-		ret = -ENODEV;
-		goto err1;
-	}
+	if (!ds1wm_data->cell)
+		return -ENODEV;
 	plat = pdev->dev.platform_data;
-	if (!plat) {
-		ret = -ENODEV;
-		goto err1;
-	}
+	if (!plat)
+		return -ENODEV;
 
 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (!res) {
-		ret = -ENXIO;
-		goto err1;
-	}
+	if (!res)
+		return -ENXIO;
 	ds1wm_data->irq = res->start;
 	ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
 	ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
@@ -505,10 +497,10 @@
 	if (res->flags & IORESOURCE_IRQ_LOWEDGE)
 		irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING);
 
-	ret = request_irq(ds1wm_data->irq, ds1wm_isr,
+	ret = devm_request_irq(&pdev->dev, ds1wm_data->irq, ds1wm_isr,
 			IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data);
 	if (ret)
-		goto err1;
+		return ret;
 
 	ds1wm_up(ds1wm_data);
 
@@ -516,17 +508,12 @@
 
 	ret = w1_add_master_device(&ds1wm_master);
 	if (ret)
-		goto err2;
+		goto err;
 
 	return 0;
 
-err2:
+err:
 	ds1wm_down(ds1wm_data);
-	free_irq(ds1wm_data->irq, ds1wm_data);
-err1:
-	iounmap(ds1wm_data->map);
-err0:
-	kfree(ds1wm_data);
 
 	return ret;
 }
@@ -560,9 +547,6 @@
 
 	w1_remove_master_device(&ds1wm_master);
 	ds1wm_down(ds1wm_data);
-	free_irq(ds1wm_data->irq, ds1wm_data);
-	iounmap(ds1wm_data->map);
-	kfree(ds1wm_data);
 
 	return 0;
 }
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 6429b9e..e033491 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -51,10 +51,10 @@
  * The top 4 bits always read 0.
  * To write, the top nibble must be the 1's compl. of the low nibble.
  */
-#define DS2482_REG_CFG_1WS		0x08
-#define DS2482_REG_CFG_SPU		0x04
-#define DS2482_REG_CFG_PPM		0x02
-#define DS2482_REG_CFG_APU		0x01
+#define DS2482_REG_CFG_1WS		0x08	/* 1-wire speed */
+#define DS2482_REG_CFG_SPU		0x04	/* strong pull-up */
+#define DS2482_REG_CFG_PPM		0x02	/* presence pulse masking */
+#define DS2482_REG_CFG_APU		0x01	/* active pull-up */
 
 
 /**
@@ -132,6 +132,17 @@
 
 
 /**
+ * Helper to calculate values for configuration register
+ * @param conf the raw config value
+ * @return the value w/ complements that can be written to register
+ */
+static inline u8 ds2482_calculate_config(u8 conf)
+{
+	return conf | ((~conf & 0x0f) << 4);
+}
+
+
+/**
  * Sets the read pointer.
  * @param pdev		The ds2482 client pointer
  * @param read_ptr	see DS2482_PTR_CODE_xxx above
@@ -399,7 +410,7 @@
 		/* If the chip did reset since detect, re-config it */
 		if (err & DS2482_REG_STS_RST)
 			ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
-					     0xF0);
+					     ds2482_calculate_config(0x00));
 	}
 
 	mutex_unlock(&pdev->access_lock);
@@ -407,6 +418,32 @@
 	return retval;
 }
 
+static u8 ds2482_w1_set_pullup(void *data, int delay)
+{
+	struct ds2482_w1_chan *pchan = data;
+	struct ds2482_data    *pdev = pchan->pdev;
+	u8 retval = 1;
+
+	/* if delay is non-zero activate the pullup,
+	 * the strong pullup will be automatically deactivated
+	 * by the master, so do not explicitly deactive it
+	 */
+	if (delay) {
+		/* both waits are crucial, otherwise devices might not be
+		 * powered long enough, causing e.g. a w1_therm sensor to
+		 * provide wrong conversion results
+		 */
+		ds2482_wait_1wire_idle(pdev);
+		/* note: it seems like both SPU and APU have to be set! */
+		retval = ds2482_send_cmd_data(pdev, DS2482_CMD_WRITE_CONFIG,
+			ds2482_calculate_config(DS2482_REG_CFG_SPU |
+						DS2482_REG_CFG_APU));
+		ds2482_wait_1wire_idle(pdev);
+	}
+
+	return retval;
+}
+
 
 static int ds2482_probe(struct i2c_client *client,
 			const struct i2c_device_id *id)
@@ -452,7 +489,8 @@
 		data->w1_count = 8;
 
 	/* Set all config items to 0 (off) */
-	ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG, 0xF0);
+	ds2482_send_cmd_data(data, DS2482_CMD_WRITE_CONFIG,
+		ds2482_calculate_config(0x00));
 
 	mutex_init(&data->access_lock);
 
@@ -468,6 +506,7 @@
 		data->w1_ch[idx].w1_bm.touch_bit  = ds2482_w1_touch_bit;
 		data->w1_ch[idx].w1_bm.triplet    = ds2482_w1_triplet;
 		data->w1_ch[idx].w1_bm.reset_bus  = ds2482_w1_reset_bus;
+		data->w1_ch[idx].w1_bm.set_pullup = ds2482_w1_set_pullup;
 
 		err = w1_add_master_device(&data->w1_ch[idx].w1_bm);
 		if (err) {
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 708a25f..372c8c0 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -109,34 +109,21 @@
 	struct resource *res;
 	int err = 0;
 
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!res)
-		return -ENODEV;
-
-	mdev = kzalloc(sizeof(struct mxc_w1_device), GFP_KERNEL);
+	mdev = devm_kzalloc(&pdev->dev, sizeof(struct mxc_w1_device),
+			    GFP_KERNEL);
 	if (!mdev)
 		return -ENOMEM;
 
-	mdev->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR(mdev->clk)) {
-		err = PTR_ERR(mdev->clk);
-		goto failed_clk;
-	}
+	mdev->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(mdev->clk))
+		return PTR_ERR(mdev->clk);
 
 	mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1;
 
-	res = request_mem_region(res->start, resource_size(res),
-				"mxc_w1");
-	if (!res) {
-		err = -EBUSY;
-		goto failed_req;
-	}
-
-	mdev->regs = ioremap(res->start, resource_size(res));
-	if (!mdev->regs) {
-		dev_err(&pdev->dev, "Cannot map mxc_w1 registers\n");
-		goto failed_ioremap;
-	}
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	mdev->regs = devm_request_and_ioremap(&pdev->dev, res);
+	if (!mdev->regs)
+		return -EBUSY;
 
 	clk_prepare_enable(mdev->clk);
 	__raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
@@ -148,20 +135,10 @@
 	err = w1_add_master_device(&mdev->bus_master);
 
 	if (err)
-		goto failed_add;
+		return err;
 
 	platform_set_drvdata(pdev, mdev);
 	return 0;
-
-failed_add:
-	iounmap(mdev->regs);
-failed_ioremap:
-	release_mem_region(res->start, resource_size(res));
-failed_req:
-	clk_put(mdev->clk);
-failed_clk:
-	kfree(mdev);
-	return err;
 }
 
 /*
@@ -170,16 +147,10 @@
 static int mxc_w1_remove(struct platform_device *pdev)
 {
 	struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
-	struct resource *res;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
 	w1_remove_master_device(&mdev->bus_master);
 
-	iounmap(mdev->regs);
-	release_mem_region(res->start, resource_size(res));
 	clk_disable_unprepare(mdev->clk);
-	clk_put(mdev->clk);
 
 	platform_set_drvdata(pdev, NULL);
 
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 85b363a..d39dfa4 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -72,7 +72,7 @@
 	return 0;
 }
 
-static int __init w1_gpio_probe(struct platform_device *pdev)
+static int w1_gpio_probe(struct platform_device *pdev)
 {
 	struct w1_bus_master *master;
 	struct w1_gpio_platform_data *pdata;
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 92d08e7..c1a702f 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -41,14 +41,18 @@
  * If it was disabled a parasite powered device might not get the require
  * current to do a temperature conversion.  If it is enabled parasite powered
  * devices have a better chance of getting the current required.
+ * In case the parasite power-detection is not working (seems to be the case
+ * for some DS18S20) the strong pullup can also be forced, regardless of the
+ * power state of the devices.
+ *
+ * Summary of options:
+ * - strong_pullup = 0	Disable strong pullup completely
+ * - strong_pullup = 1	Enable automatic strong pullup detection
+ * - strong_pullup = 2	Force strong pullup
  */
 static int w1_strong_pullup = 1;
 module_param_named(strong_pullup, w1_strong_pullup, int, 0);
 
-static u8 bad_roms[][9] = {
-				{0xaa, 0x00, 0x4b, 0x46, 0xff, 0xff, 0x0c, 0x10, 0x87},
-				{}
-			};
 
 static ssize_t w1_therm_read(struct device *device,
 	struct device_attribute *attr, char *buf);
@@ -168,16 +172,6 @@
 	return 0;
 }
 
-static int w1_therm_check_rom(u8 rom[9])
-{
-	int i;
-
-	for (i=0; i<sizeof(bad_roms)/9; ++i)
-		if (!memcmp(bad_roms[i], rom, 9))
-			return 1;
-
-	return 0;
-}
 
 static ssize_t w1_therm_read(struct device *device,
 	struct device_attribute *attr, char *buf)
@@ -194,10 +188,11 @@
 
 	memset(rom, 0, sizeof(rom));
 
-	verdict = 0;
-	crc = 0;
-
 	while (max_trying--) {
+
+		verdict = 0;
+		crc = 0;
+
 		if (!w1_reset_select_slave(sl)) {
 			int count = 0;
 			unsigned int tm = 750;
@@ -210,7 +205,8 @@
 				continue;
 
 			/* 750ms strong pullup (or delay) after the convert */
-			if (!external_power && w1_strong_pullup)
+			if (w1_strong_pullup == 2 ||
+					(!external_power && w1_strong_pullup))
 				w1_next_pullup(dev, tm);
 
 			w1_write_8(dev, W1_CONVERT_TEMP);
@@ -249,7 +245,7 @@
 			}
 		}
 
-		if (!w1_therm_check_rom(rom))
+		if (verdict)
 			break;
 	}
 
@@ -260,7 +256,7 @@
 	if (verdict)
 		memcpy(sl->rom, rom, sizeof(sl->rom));
 	else
-		dev_warn(device, "18S20 doesn't respond to CONVERT_TEMP.\n");
+		dev_warn(device, "Read failed CRC check\n");
 
 	for (i = 0; i < 9; ++i)
 		c -= snprintf(buf + PAGE_SIZE - c, c, "%02x ", sl->rom[i]);
diff --git a/include/linux/extcon/extcon_gpio.h b/include/linux/extcon/extcon-gpio.h
similarity index 100%
rename from include/linux/extcon/extcon_gpio.h
rename to include/linux/extcon/extcon-gpio.h
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index e73b852..df77ba9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -325,14 +325,28 @@
 
 	u32 interrupt_mask;
 
-	/* Pad it to PAGE_SIZE so that data starts on page boundary */
-	u8	reserved[4084];
-
-	/* NOTE:
-	 * The interrupt_mask field is used only for channels but since our
-	 * vmbus connection also uses this data structure and its data starts
-	 * here, we commented out this field.
+	/*
+	 * Win8 uses some of the reserved bits to implement
+	 * interrupt driven flow management. On the send side
+	 * we can request that the receiver interrupt the sender
+	 * when the ring transitions from being full to being able
+	 * to handle a message of size "pending_send_sz".
+	 *
+	 * Add necessary state for this enhancement.
 	 */
+	u32 pending_send_sz;
+
+	u32 reserved1[12];
+
+	union {
+		struct {
+			u32 feat_pending_send_sz:1;
+		};
+		u32 value;
+	} feature_bits;
+
+	/* Pad it to PAGE_SIZE so that data starts on page boundary */
+	u8	reserved2[4028];
 
 	/*
 	 * Ring data starts here + RingDataStartOffset
@@ -405,12 +419,22 @@
  */
 #define HV_DRV_VERSION           "3.1"
 
-
 /*
- * A revision number of vmbus that is used for ensuring both ends on a
- * partition are using compatible versions.
+ * VMBUS version is 32 bit entity broken up into
+ * two 16 bit quantities: major_number. minor_number.
+ *
+ * 0 . 13 (Windows Server 2008)
+ * 1 . 1  (Windows 7)
+ * 2 . 4  (Windows 8)
  */
-#define VMBUS_REVISION_NUMBER		13
+
+#define VERSION_WS2008  ((0 << 16) | (13))
+#define VERSION_WIN7    ((1 << 16) | (1))
+#define VERSION_WIN8    ((2 << 16) | (4))
+
+#define VERSION_INVAL -1
+
+#define VERSION_CURRENT VERSION_WIN8
 
 /* Make maximum size of pipe payload of 16K */
 #define MAX_PIPE_DATA_PAYLOAD		(sizeof(u8) * 16384)
@@ -432,9 +456,13 @@
 struct vmbus_channel_offer {
 	uuid_le if_type;
 	uuid_le if_instance;
-	u64 int_latency; /* in 100ns units */
-	u32 if_revision;
-	u32 server_ctx_size;	/* in bytes */
+
+	/*
+	 * These two fields are not currently used.
+	 */
+	u64 reserved1;
+	u64 reserved2;
+
 	u16 chn_flags;
 	u16 mmio_megabytes;		/* in bytes * 1024 * 1024 */
 
@@ -456,7 +484,11 @@
 			unsigned char user_def[MAX_PIPE_USER_DEFINED_BYTES];
 		} pipe;
 	} u;
-	u32 padding;
+	/*
+	 * The sub_channel_index is defined in win8.
+	 */
+	u16 sub_channel_index;
+	u16 reserved3;
 } __packed;
 
 /* Server Flags */
@@ -652,7 +684,25 @@
 	struct vmbus_channel_offer offer;
 	u32 child_relid;
 	u8 monitorid;
-	u8 monitor_allocated;
+	/*
+	 * win7 and beyond splits this field into a bit field.
+	 */
+	u8 monitor_allocated:1;
+	u8 reserved:7;
+	/*
+	 * These are new fields added in win7 and later.
+	 * Do not access these fields without checking the
+	 * negotiated protocol.
+	 *
+	 * If "is_dedicated_interrupt" is set, we must not set the
+	 * associated bit in the channel bitmap while sending the
+	 * interrupt to the host.
+	 *
+	 * connection_id is to be used in signaling the host.
+	 */
+	u16 is_dedicated_interrupt:1;
+	u16 reserved1:15;
+	u32 connection_id;
 } __packed;
 
 /* Rescind Offer parameters */
@@ -683,8 +733,15 @@
 	/* GPADL for the channel's ring buffer. */
 	u32 ringbuffer_gpadlhandle;
 
-	/* GPADL for the channel's server context save area. */
-	u32 server_contextarea_gpadlhandle;
+	/*
+	 * Starting with win8, this field will be used to specify
+	 * the target virtual processor on which to deliver the interrupt for
+	 * the host to guest communication.
+	 * Prior to win8, incoming channel interrupts would only
+	 * be delivered on cpu 0. Setting this value to 0 would
+	 * preserve the earlier behavior.
+	 */
+	u32 target_vp;
 
 	/*
 	* The upstream ring buffer begins at offset zero in the memory
@@ -848,6 +905,27 @@
 	struct vmbus_channel_close_channel msg;
 };
 
+/* Define connection identifier type. */
+union hv_connection_id {
+	u32 asu32;
+	struct {
+		u32 id:24;
+		u32 reserved:8;
+	} u;
+};
+
+/* Definition of the hv_signal_event hypercall input structure. */
+struct hv_input_signal_event {
+	union hv_connection_id connectionid;
+	u16 flag_number;
+	u16 rsvdz;
+};
+
+struct hv_input_signal_event_buffer {
+	u64 align8;
+	struct hv_input_signal_event event;
+};
+
 struct vmbus_channel {
 	struct list_head listentry;
 
@@ -882,8 +960,42 @@
 
 	void (*onchannel_callback)(void *context);
 	void *channel_callback_context;
+
+	/*
+	 * A channel can be marked for efficient (batched)
+	 * reading:
+	 * If batched_reading is set to "true", we read until the
+	 * channel is empty and hold off interrupts from the host
+	 * during the entire read process.
+	 * If batched_reading is set to "false", the client is not
+	 * going to perform batched reading.
+	 *
+	 * By default we will enable batched reading; specific
+	 * drivers that don't want this behavior can turn it off.
+	 */
+
+	bool batched_reading;
+
+	bool is_dedicated_interrupt;
+	struct hv_input_signal_event_buffer sig_buf;
+	struct hv_input_signal_event *sig_event;
+
+	/*
+	 * Starting with win8, this field will be used to specify
+	 * the target virtual processor on which to deliver the interrupt for
+	 * the host to guest communication.
+	 * Prior to win8, incoming channel interrupts would only
+	 * be delivered on cpu 0. Setting this value to 0 would
+	 * preserve the earlier behavior.
+	 */
+	u32 target_vp;
 };
 
+static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
+{
+	c->batched_reading = state;
+}
+
 void vmbus_onmessage(void *context);
 
 int vmbus_request_offers(void);
@@ -1047,6 +1159,100 @@
 		  g8, g9, ga, gb, gc, gd, ge, gf },
 
 /*
+ * GUID definitions of various offer types - services offered to the guest.
+ */
+
+/*
+ * Network GUID
+ * {f8615163-df3e-46c5-913f-f2d2f965ed0e}
+ */
+#define HV_NIC_GUID \
+	.guid = { \
+			0x63, 0x51, 0x61, 0xf8, 0x3e, 0xdf, 0xc5, 0x46, \
+			0x91, 0x3f, 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e \
+		}
+
+/*
+ * IDE GUID
+ * {32412632-86cb-44a2-9b5c-50d1417354f5}
+ */
+#define HV_IDE_GUID \
+	.guid = { \
+			0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44, \
+			0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5 \
+		}
+
+/*
+ * SCSI GUID
+ * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f}
+ */
+#define HV_SCSI_GUID \
+	.guid = { \
+			0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, \
+			0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f \
+		}
+
+/*
+ * Shutdown GUID
+ * {0e0b6031-5213-4934-818b-38d90ced39db}
+ */
+#define HV_SHUTDOWN_GUID \
+	.guid = { \
+			0x31, 0x60, 0x0b, 0x0e, 0x13, 0x52, 0x34, 0x49, \
+			0x81, 0x8b, 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb \
+		}
+
+/*
+ * Time Synch GUID
+ * {9527E630-D0AE-497b-ADCE-E80AB0175CAF}
+ */
+#define HV_TS_GUID \
+	.guid = { \
+			0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, \
+			0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf \
+		}
+
+/*
+ * Heartbeat GUID
+ * {57164f39-9115-4e78-ab55-382f3bd5422d}
+ */
+#define HV_HEART_BEAT_GUID \
+	.guid = { \
+			0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, \
+			0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d \
+		}
+
+/*
+ * KVP GUID
+ * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6}
+ */
+#define HV_KVP_GUID \
+	.guid = { \
+			0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, \
+			0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3,  0xe6 \
+		}
+
+/*
+ * Dynamic memory GUID
+ * {525074dc-8985-46e2-8057-a307dc18a502}
+ */
+#define HV_DM_GUID \
+	.guid = { \
+			0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, \
+			0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02 \
+		}
+
+/*
+ * Mouse GUID
+ * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a}
+ */
+#define HV_MOUSE_GUID \
+	.guid = { \
+			0x9e, 0xb6, 0xa8, 0xcf, 0x4a, 0x5b, 0xc0, 0x4c, \
+			0xb9, 0x8b, 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a \
+		}
+
+/*
  * Common header for Hyper-V ICs
  */
 
@@ -1150,5 +1356,11 @@
 void hv_kvp_deinit(void);
 void hv_kvp_onchannelcallback(void *);
 
+/*
+ * Negotiated version with the Host.
+ */
+
+extern __u32 vmbus_proto_version;
+
 #endif /* __KERNEL__ */
 #endif /* _HYPERV_H */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index a580363..a710255 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -75,8 +75,10 @@
 #define ARIZONA_IRQ_DCS_HP_DONE           47
 #define ARIZONA_IRQ_FLL2_CLOCK_OK         48
 #define ARIZONA_IRQ_FLL1_CLOCK_OK         49
+#define ARIZONA_IRQ_MICD_CLAMP_RISE	  50
+#define ARIZONA_IRQ_MICD_CLAMP_FALL	  51
 
-#define ARIZONA_NUM_IRQ                   50
+#define ARIZONA_NUM_IRQ                   52
 
 struct snd_soc_dapm_context;
 
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index ec3e2a2..96d64f2 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -105,9 +105,30 @@
 	 */
 	int max_channels_clocked[ARIZONA_MAX_AIF];
 
+	/** GPIO5 is used for jack detection */
+	bool jd_gpio5;
+
+	/** Use the headphone detect circuit to identify the accessory */
+	bool hpdet_acc_id;
+
+	/** GPIO used for mic isolation with HPDET */
+	int hpdet_id_gpio;
+
 	/** GPIO for mic detection polarity */
 	int micd_pol_gpio;
 
+	/** Mic detect ramp rate */
+	int micd_bias_start_time;
+
+	/** Mic detect sample rate */
+	int micd_rate;
+
+	/** Mic detect debounce level */
+	int micd_dbtime;
+
+	/** Force MICBIAS on for mic detect */
+	bool micd_force_micbias;
+
 	/** Headset polarity configurations */
 	struct arizona_micd_config *micd_configs;
 	int num_micd_configs;
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index 1f6fe31..188d89a 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -119,6 +119,8 @@
 #define ARIZONA_ACCESSORY_DETECT_MODE_1          0x293
 #define ARIZONA_HEADPHONE_DETECT_1               0x29B
 #define ARIZONA_HEADPHONE_DETECT_2               0x29C
+#define ARIZONA_HP_DACVAL			 0x29F
+#define ARIZONA_MICD_CLAMP_CONTROL               0x2A2
 #define ARIZONA_MIC_DETECT_1                     0x2A3
 #define ARIZONA_MIC_DETECT_2                     0x2A4
 #define ARIZONA_MIC_DETECT_3                     0x2A5
@@ -1194,6 +1196,14 @@
 /*
  * R64 (0x40) - Wake control
  */
+#define ARIZONA_WKUP_MICD_CLAMP_FALL             0x0080  /* WKUP_MICD_CLAMP_FALL */
+#define ARIZONA_WKUP_MICD_CLAMP_FALL_MASK        0x0080  /* WKUP_MICD_CLAMP_FALL */
+#define ARIZONA_WKUP_MICD_CLAMP_FALL_SHIFT            7  /* WKUP_MICD_CLAMP_FALL */
+#define ARIZONA_WKUP_MICD_CLAMP_FALL_WIDTH            1  /* WKUP_MICD_CLAMP_FALL */
+#define ARIZONA_WKUP_MICD_CLAMP_RISE             0x0040  /* WKUP_MICD_CLAMP_RISE */
+#define ARIZONA_WKUP_MICD_CLAMP_RISE_MASK        0x0040  /* WKUP_MICD_CLAMP_RISE */
+#define ARIZONA_WKUP_MICD_CLAMP_RISE_SHIFT            6  /* WKUP_MICD_CLAMP_RISE */
+#define ARIZONA_WKUP_MICD_CLAMP_RISE_WIDTH            1  /* WKUP_MICD_CLAMP_RISE */
 #define ARIZONA_WKUP_GP5_FALL                    0x0020  /* WKUP_GP5_FALL */
 #define ARIZONA_WKUP_GP5_FALL_MASK               0x0020  /* WKUP_GP5_FALL */
 #define ARIZONA_WKUP_GP5_FALL_SHIFT                   5  /* WKUP_GP5_FALL */
@@ -2035,6 +2045,9 @@
 /*
  * R667 (0x29B) - Headphone Detect 1
  */
+#define ARIZONA_HP_IMPEDANCE_RANGE_MASK          0x0600  /* HP_IMPEDANCE_RANGE - [10:9] */
+#define ARIZONA_HP_IMPEDANCE_RANGE_SHIFT              9  /* HP_IMPEDANCE_RANGE - [10:9] */
+#define ARIZONA_HP_IMPEDANCE_RANGE_WIDTH              2  /* HP_IMPEDANCE_RANGE - [10:9] */
 #define ARIZONA_HP_STEP_SIZE                     0x0100  /* HP_STEP_SIZE */
 #define ARIZONA_HP_STEP_SIZE_MASK                0x0100  /* HP_STEP_SIZE */
 #define ARIZONA_HP_STEP_SIZE_SHIFT                    8  /* HP_STEP_SIZE */
@@ -2069,6 +2082,21 @@
 #define ARIZONA_HP_LVL_SHIFT                          0  /* HP_LVL - [6:0] */
 #define ARIZONA_HP_LVL_WIDTH                          7  /* HP_LVL - [6:0] */
 
+#define ARIZONA_HP_DONE_B                        0x8000  /* HP_DONE */
+#define ARIZONA_HP_DONE_B_MASK                   0x8000  /* HP_DONE */
+#define ARIZONA_HP_DONE_B_SHIFT                      15  /* HP_DONE */
+#define ARIZONA_HP_DONE_B_WIDTH                       1  /* HP_DONE */
+#define ARIZONA_HP_LVL_B_MASK                    0x7FFF  /* HP_LVL - [14:0] */
+#define ARIZONA_HP_LVL_B_SHIFT                        0  /* HP_LVL - [14:0] */
+#define ARIZONA_HP_LVL_B_WIDTH                       15  /* HP_LVL - [14:0] */
+
+/*
+ * R674 (0x2A2) - MICD clamp control
+ */
+#define ARIZONA_MICD_CLAMP_MODE_MASK             0x000F  /* MICD_CLAMP_MODE - [3:0] */
+#define ARIZONA_MICD_CLAMP_MODE_SHIFT                 0  /* MICD_CLAMP_MODE - [3:0] */
+#define ARIZONA_MICD_CLAMP_MODE_WIDTH                 4  /* MICD_CLAMP_MODE - [3:0] */
+
 /*
  * R675 (0x2A3) - Mic Detect 1
  */
@@ -5239,6 +5267,14 @@
 /*
  * R3408 (0xD50) - AOD wkup and trig
  */
+#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS         0x0080  /* MICD_CLAMP_FALL_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_MASK    0x0080  /* MICD_CLAMP_FALL_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_SHIFT        7  /* MICD_CLAMP_FALL_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_FALL_TRIG_STS_WIDTH        1  /* MICD_CLAMP_FALL_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS         0x0040  /* MICD_CLAMP_RISE_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_MASK    0x0040  /* MICD_CLAMP_RISE_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_SHIFT        6  /* MICD_CLAMP_RISE_TRIG_STS */
+#define ARIZONA_MICD_CLAMP_RISE_TRIG_STS_WIDTH        1  /* MICD_CLAMP_RISE_TRIG_STS */
 #define ARIZONA_GP5_FALL_TRIG_STS                0x0020  /* GP5_FALL_TRIG_STS */
 #define ARIZONA_GP5_FALL_TRIG_STS_MASK           0x0020  /* GP5_FALL_TRIG_STS */
 #define ARIZONA_GP5_FALL_TRIG_STS_SHIFT               5  /* GP5_FALL_TRIG_STS */
@@ -5267,6 +5303,12 @@
 /*
  * R3409 (0xD51) - AOD IRQ1
  */
+#define ARIZONA_MICD_CLAMP_FALL_EINT1            0x0080  /* MICD_CLAMP_FALL_EINT1 */
+#define ARIZONA_MICD_CLAMP_FALL_EINT1_MASK       0x0080  /* MICD_CLAMP_FALL_EINT1 */
+#define ARIZONA_MICD_CLAMP_FALL_EINT1_SHIFT           7  /* MICD_CLAMP_FALL_EINT1 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT1            0x0040  /* MICD_CLAMP_RISE_EINT1 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT1_MASK       0x0040  /* MICD_CLAMP_RISE_EINT1 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT1_SHIFT           6  /* MICD_CLAMP_RISE_EINT1 */
 #define ARIZONA_GP5_FALL_EINT1                   0x0020  /* GP5_FALL_EINT1 */
 #define ARIZONA_GP5_FALL_EINT1_MASK              0x0020  /* GP5_FALL_EINT1 */
 #define ARIZONA_GP5_FALL_EINT1_SHIFT                  5  /* GP5_FALL_EINT1 */
@@ -5295,6 +5337,12 @@
 /*
  * R3410 (0xD52) - AOD IRQ2
  */
+#define ARIZONA_MICD_CLAMP_FALL_EINT2            0x0080  /* MICD_CLAMP_FALL_EINT2 */
+#define ARIZONA_MICD_CLAMP_FALL_EINT2_MASK       0x0080  /* MICD_CLAMP_FALL_EINT2 */
+#define ARIZONA_MICD_CLAMP_FALL_EINT2_SHIFT           7  /* MICD_CLAMP_FALL_EINT2 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT2            0x0040  /* MICD_CLAMP_RISE_EINT2 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT2_MASK       0x0040  /* MICD_CLAMP_RISE_EINT2 */
+#define ARIZONA_MICD_CLAMP_RISE_EINT2_SHIFT           6  /* MICD_CLAMP_RISE_EINT2 */
 #define ARIZONA_GP5_FALL_EINT2                   0x0020  /* GP5_FALL_EINT2 */
 #define ARIZONA_GP5_FALL_EINT2_MASK              0x0020  /* GP5_FALL_EINT2 */
 #define ARIZONA_GP5_FALL_EINT2_SHIFT                  5  /* GP5_FALL_EINT2 */
@@ -5379,6 +5427,10 @@
 /*
  * R3413 (0xD55) - AOD IRQ Raw Status
  */
+#define ARIZONA_MICD_CLAMP_STS                   0x0008  /* MICD_CLAMP_STS */
+#define ARIZONA_MICD_CLAMP_STS_MASK              0x0008  /* MICD_CLAMP_STS */
+#define ARIZONA_MICD_CLAMP_STS_SHIFT                  3  /* MICD_CLAMP_STS */
+#define ARIZONA_MICD_CLAMP_STS_WIDTH                  1  /* MICD_CLAMP_STS */
 #define ARIZONA_GP5_STS                          0x0004  /* GP5_STS */
 #define ARIZONA_GP5_STS_MASK                     0x0004  /* GP5_STS */
 #define ARIZONA_GP5_STS_SHIFT                         2  /* GP5_STS */
@@ -5395,6 +5447,10 @@
 /*
  * R3414 (0xD56) - Jack detect debounce
  */
+#define ARIZONA_MICD_CLAMP_DB                    0x0008  /* MICD_CLAMP_DB */
+#define ARIZONA_MICD_CLAMP_DB_MASK               0x0008  /* MICD_CLAMP_DB */
+#define ARIZONA_MICD_CLAMP_DB_SHIFT                   3  /* MICD_CLAMP_DB */
+#define ARIZONA_MICD_CLAMP_DB_WIDTH                   1  /* MICD_CLAMP_DB */
 #define ARIZONA_JD2_DB                           0x0002  /* JD2_DB */
 #define ARIZONA_JD2_DB_MASK                      0x0002  /* JD2_DB */
 #define ARIZONA_JD2_DB_SHIFT                          1  /* JD2_DB */
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 1eeae5c..5b18ecd 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -106,6 +106,92 @@
 	MAX77693_MUIC_REG_END,
 };
 
+/* MAX77693 MUIC - STATUS1~3 Register */
+#define STATUS1_ADC_SHIFT		(0)
+#define STATUS1_ADCLOW_SHIFT		(5)
+#define STATUS1_ADCERR_SHIFT		(6)
+#define STATUS1_ADC1K_SHIFT		(7)
+#define STATUS1_ADC_MASK		(0x1f << STATUS1_ADC_SHIFT)
+#define STATUS1_ADCLOW_MASK		(0x1 << STATUS1_ADCLOW_SHIFT)
+#define STATUS1_ADCERR_MASK		(0x1 << STATUS1_ADCERR_SHIFT)
+#define STATUS1_ADC1K_MASK		(0x1 << STATUS1_ADC1K_SHIFT)
+
+#define STATUS2_CHGTYP_SHIFT		(0)
+#define STATUS2_CHGDETRUN_SHIFT		(3)
+#define STATUS2_DCDTMR_SHIFT		(4)
+#define STATUS2_DXOVP_SHIFT		(5)
+#define STATUS2_VBVOLT_SHIFT		(6)
+#define STATUS2_VIDRM_SHIFT		(7)
+#define STATUS2_CHGTYP_MASK		(0x7 << STATUS2_CHGTYP_SHIFT)
+#define STATUS2_CHGDETRUN_MASK		(0x1 << STATUS2_CHGDETRUN_SHIFT)
+#define STATUS2_DCDTMR_MASK		(0x1 << STATUS2_DCDTMR_SHIFT)
+#define STATUS2_DXOVP_MASK		(0x1 << STATUS2_DXOVP_SHIFT)
+#define STATUS2_VBVOLT_MASK		(0x1 << STATUS2_VBVOLT_SHIFT)
+#define STATUS2_VIDRM_MASK		(0x1 << STATUS2_VIDRM_SHIFT)
+
+#define STATUS3_OVP_SHIFT		(2)
+#define STATUS3_OVP_MASK		(0x1 << STATUS3_OVP_SHIFT)
+
+/* MAX77693 CDETCTRL1~2 register */
+#define CDETCTRL1_CHGDETEN_SHIFT	(0)
+#define CDETCTRL1_CHGTYPMAN_SHIFT	(1)
+#define CDETCTRL1_DCDEN_SHIFT		(2)
+#define CDETCTRL1_DCD2SCT_SHIFT		(3)
+#define CDETCTRL1_CDDELAY_SHIFT		(4)
+#define CDETCTRL1_DCDCPL_SHIFT		(5)
+#define CDETCTRL1_CDPDET_SHIFT		(7)
+#define CDETCTRL1_CHGDETEN_MASK		(0x1 << CDETCTRL1_CHGDETEN_SHIFT)
+#define CDETCTRL1_CHGTYPMAN_MASK	(0x1 << CDETCTRL1_CHGTYPMAN_SHIFT)
+#define CDETCTRL1_DCDEN_MASK		(0x1 << CDETCTRL1_DCDEN_SHIFT)
+#define CDETCTRL1_DCD2SCT_MASK		(0x1 << CDETCTRL1_DCD2SCT_SHIFT)
+#define CDETCTRL1_CDDELAY_MASK		(0x1 << CDETCTRL1_CDDELAY_SHIFT)
+#define CDETCTRL1_DCDCPL_MASK		(0x1 << CDETCTRL1_DCDCPL_SHIFT)
+#define CDETCTRL1_CDPDET_MASK		(0x1 << CDETCTRL1_CDPDET_SHIFT)
+
+#define CDETCTRL2_VIDRMEN_SHIFT		(1)
+#define CDETCTRL2_DXOVPEN_SHIFT		(3)
+#define CDETCTRL2_VIDRMEN_MASK		(0x1 << CDETCTRL2_VIDRMEN_SHIFT)
+#define CDETCTRL2_DXOVPEN_MASK		(0x1 << CDETCTRL2_DXOVPEN_SHIFT)
+
+/* MAX77693 MUIC - CONTROL1~3 register */
+#define COMN1SW_SHIFT			(0)
+#define COMP2SW_SHIFT			(3)
+#define COMN1SW_MASK			(0x7 << COMN1SW_SHIFT)
+#define COMP2SW_MASK			(0x7 << COMP2SW_SHIFT)
+#define COMP_SW_MASK			(COMP2SW_MASK | COMN1SW_MASK)
+#define CONTROL1_SW_USB			((1 << COMP2SW_SHIFT) \
+						| (1 << COMN1SW_SHIFT))
+#define CONTROL1_SW_AUDIO		((2 << COMP2SW_SHIFT) \
+						| (2 << COMN1SW_SHIFT))
+#define CONTROL1_SW_UART		((3 << COMP2SW_SHIFT) \
+						| (3 << COMN1SW_SHIFT))
+#define CONTROL1_SW_OPEN		((0 << COMP2SW_SHIFT) \
+						| (0 << COMN1SW_SHIFT))
+
+#define CONTROL2_LOWPWR_SHIFT		(0)
+#define CONTROL2_ADCEN_SHIFT		(1)
+#define CONTROL2_CPEN_SHIFT		(2)
+#define CONTROL2_SFOUTASRT_SHIFT	(3)
+#define CONTROL2_SFOUTORD_SHIFT		(4)
+#define CONTROL2_ACCDET_SHIFT		(5)
+#define CONTROL2_USBCPINT_SHIFT		(6)
+#define CONTROL2_RCPS_SHIFT		(7)
+#define CONTROL2_LOWPWR_MASK		(0x1 << CONTROL2_LOWPWR_SHIFT)
+#define CONTROL2_ADCEN_MASK		(0x1 << CONTROL2_ADCEN_SHIFT)
+#define CONTROL2_CPEN_MASK		(0x1 << CONTROL2_CPEN_SHIFT)
+#define CONTROL2_SFOUTASRT_MASK		(0x1 << CONTROL2_SFOUTASRT_SHIFT)
+#define CONTROL2_SFOUTORD_MASK		(0x1 << CONTROL2_SFOUTORD_SHIFT)
+#define CONTROL2_ACCDET_MASK		(0x1 << CONTROL2_ACCDET_SHIFT)
+#define CONTROL2_USBCPINT_MASK		(0x1 << CONTROL2_USBCPINT_SHIFT)
+#define CONTROL2_RCPS_MASK		(0x1 << CONTROL2_RCPS_SHIFT)
+
+#define CONTROL3_JIGSET_SHIFT		(0)
+#define CONTROL3_BTLDSET_SHIFT		(2)
+#define CONTROL3_ADCDBSET_SHIFT		(4)
+#define CONTROL3_JIGSET_MASK		(0x3 << CONTROL3_JIGSET_SHIFT)
+#define CONTROL3_BTLDSET_MASK		(0x3 << CONTROL3_BTLDSET_SHIFT)
+#define CONTROL3_ADCDBSET_MASK		(0x3 << CONTROL3_ADCDBSET_SHIFT)
+
 /* Slave addr = 0x90: Haptic */
 enum max77693_haptic_reg {
 	MAX77693_HAPTIC_REG_STATUS		= 0x00,
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index fe03b2d..3109a6c 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -38,6 +38,15 @@
 struct max77693_muic_platform_data {
 	struct max77693_reg_data *init_data;
 	int num_init_data;
+
+	int detcable_delay_ms;
+
+	/*
+	 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+	 * h/w path of COMP2/COMN1 on CONTROL1 register.
+	 */
+	int path_usb;
+	int path_uart;
 };
 
 struct max77693_platform_data {
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 6ae21bf..fb465df 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -194,6 +194,70 @@
 	MAX8997_MUIC_REG_END		= 0xf,
 };
 
+/* MAX8997-MUIC STATUS1 register */
+#define STATUS1_ADC_SHIFT		0
+#define STATUS1_ADCLOW_SHIFT		5
+#define STATUS1_ADCERR_SHIFT		6
+#define STATUS1_ADC_MASK		(0x1f << STATUS1_ADC_SHIFT)
+#define STATUS1_ADCLOW_MASK		(0x1 << STATUS1_ADCLOW_SHIFT)
+#define STATUS1_ADCERR_MASK		(0x1 << STATUS1_ADCERR_SHIFT)
+
+/* MAX8997-MUIC STATUS2 register */
+#define STATUS2_CHGTYP_SHIFT		0
+#define STATUS2_CHGDETRUN_SHIFT		3
+#define STATUS2_DCDTMR_SHIFT		4
+#define STATUS2_DBCHG_SHIFT		5
+#define STATUS2_VBVOLT_SHIFT		6
+#define STATUS2_CHGTYP_MASK		(0x7 << STATUS2_CHGTYP_SHIFT)
+#define STATUS2_CHGDETRUN_MASK		(0x1 << STATUS2_CHGDETRUN_SHIFT)
+#define STATUS2_DCDTMR_MASK		(0x1 << STATUS2_DCDTMR_SHIFT)
+#define STATUS2_DBCHG_MASK		(0x1 << STATUS2_DBCHG_SHIFT)
+#define STATUS2_VBVOLT_MASK		(0x1 << STATUS2_VBVOLT_SHIFT)
+
+/* MAX8997-MUIC STATUS3 register */
+#define STATUS3_OVP_SHIFT		2
+#define STATUS3_OVP_MASK		(0x1 << STATUS3_OVP_SHIFT)
+
+/* MAX8997-MUIC CONTROL1 register */
+#define COMN1SW_SHIFT			0
+#define COMP2SW_SHIFT			3
+#define COMN1SW_MASK			(0x7 << COMN1SW_SHIFT)
+#define COMP2SW_MASK			(0x7 << COMP2SW_SHIFT)
+#define COMP_SW_MASK		(COMP2SW_MASK | COMN1SW_MASK)
+
+#define CONTROL1_SW_USB			((1 << COMP2SW_SHIFT) \
+						| (1 << COMN1SW_SHIFT))
+#define CONTROL1_SW_AUDIO		((2 << COMP2SW_SHIFT) \
+						| (2 << COMN1SW_SHIFT))
+#define CONTROL1_SW_UART		((3 << COMP2SW_SHIFT) \
+						| (3 << COMN1SW_SHIFT))
+#define CONTROL1_SW_OPEN		((0 << COMP2SW_SHIFT) \
+						| (0 << COMN1SW_SHIFT))
+
+#define CONTROL2_LOWPWR_SHIFT		(0)
+#define CONTROL2_ADCEN_SHIFT		(1)
+#define CONTROL2_CPEN_SHIFT		(2)
+#define CONTROL2_SFOUTASRT_SHIFT	(3)
+#define CONTROL2_SFOUTORD_SHIFT		(4)
+#define CONTROL2_ACCDET_SHIFT		(5)
+#define CONTROL2_USBCPINT_SHIFT		(6)
+#define CONTROL2_RCPS_SHIFT		(7)
+#define CONTROL2_LOWPWR_MASK		(0x1 << CONTROL2_LOWPWR_SHIFT)
+#define CONTROL2_ADCEN_MASK		(0x1 << CONTROL2_ADCEN_SHIFT)
+#define CONTROL2_CPEN_MASK		(0x1 << CONTROL2_CPEN_SHIFT)
+#define CONTROL2_SFOUTASRT_MASK		(0x1 << CONTROL2_SFOUTASRT_SHIFT)
+#define CONTROL2_SFOUTORD_MASK		(0x1 << CONTROL2_SFOUTORD_SHIFT)
+#define CONTROL2_ACCDET_MASK		(0x1 << CONTROL2_ACCDET_SHIFT)
+#define CONTROL2_USBCPINT_MASK		(0x1 << CONTROL2_USBCPINT_SHIFT)
+#define CONTROL2_RCPS_MASK		(0x1 << CONTROL2_RCPS_SHIFT)
+
+#define CONTROL3_JIGSET_SHIFT		(0)
+#define CONTROL3_BTLDSET_SHIFT		(2)
+#define CONTROL3_ADCDBSET_SHIFT		(4)
+#define CONTROL3_JIGSET_MASK		(0x3 << CONTROL3_JIGSET_SHIFT)
+#define CONTROL3_BTLDSET_MASK		(0x3 << CONTROL3_BTLDSET_SHIFT)
+#define CONTROL3_ADCDBSET_MASK		(0x3 << CONTROL3_ADCDBSET_SHIFT)
+
 enum max8997_haptic_reg {
 	MAX8997_HAPTIC_REG_GENERAL	= 0x00,
 	MAX8997_HAPTIC_REG_CONF1	= 0x01,
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index 1d4a4fe..cf81557 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -78,21 +78,6 @@
 	struct device_node *reg_node;
 };
 
-enum max8997_muic_usb_type {
-	MAX8997_USB_HOST,
-	MAX8997_USB_DEVICE,
-};
-
-enum max8997_muic_charger_type {
-	MAX8997_CHARGER_TYPE_NONE = 0,
-	MAX8997_CHARGER_TYPE_USB,
-	MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT,
-	MAX8997_CHARGER_TYPE_DEDICATED_CHG,
-	MAX8997_CHARGER_TYPE_500MA,
-	MAX8997_CHARGER_TYPE_1A,
-	MAX8997_CHARGER_TYPE_DEAD_BATTERY = 7,
-};
-
 struct max8997_muic_reg_data {
 	u8 addr;
 	u8 data;
@@ -107,6 +92,16 @@
 struct max8997_muic_platform_data {
 	struct max8997_muic_reg_data *init_data;
 	int num_init_data;
+
+	/* Check cable state after certain delay */
+	int detcable_delay_ms;
+
+	/*
+	 * Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
+	 * h/w path of COMP2/COMN1 on CONTROL1 register.
+	 */
+	int path_usb;
+	int path_uart;
 };
 
 enum max8997_haptic_motor_type {
diff --git a/include/linux/ntb.h b/include/linux/ntb.h
new file mode 100644
index 0000000..f6a1520
--- /dev/null
+++ b/include/linux/ntb.h
@@ -0,0 +1,83 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ *   redistributing this file, you may do so under either license.
+ *
+ *   GPL LICENSE SUMMARY
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of version 2 of the GNU General Public License as
+ *   published by the Free Software Foundation.
+ *
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2012 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copy
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Intel PCIe NTB Linux driver
+ *
+ * Contact Information:
+ * Jon Mason <jon.mason@intel.com>
+ */
+
+struct ntb_transport_qp;
+
+struct ntb_client {
+	struct device_driver driver;
+	int (*probe) (struct pci_dev *pdev);
+	void (*remove) (struct pci_dev *pdev);
+};
+
+int ntb_register_client(struct ntb_client *drvr);
+void ntb_unregister_client(struct ntb_client *drvr);
+int ntb_register_client_dev(char *device_name);
+void ntb_unregister_client_dev(char *device_name);
+
+struct ntb_queue_handlers {
+	void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+			    void *data, int len);
+	void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
+			    void *data, int len);
+	void (*event_handler) (void *data, int status);
+};
+
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
+struct ntb_transport_qp *
+ntb_transport_create_queue(void *data, struct pci_dev *pdev,
+			   const struct ntb_queue_handlers *handlers);
+void ntb_transport_free_queue(struct ntb_transport_qp *qp);
+int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+			     unsigned int len);
+int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
+			     unsigned int len);
+void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len);
+void ntb_transport_link_up(struct ntb_transport_qp *qp);
+void ntb_transport_link_down(struct ntb_transport_qp *qp);
+bool ntb_transport_link_query(struct ntb_transport_qp *qp);
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
new file mode 100644
index 0000000..023430e
--- /dev/null
+++ b/include/linux/vmw_vmci_api.h
@@ -0,0 +1,82 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef __VMW_VMCI_API_H__
+#define __VMW_VMCI_API_H__
+
+#include <linux/uidgid.h>
+#include <linux/vmw_vmci_defs.h>
+
+#undef  VMCI_KERNEL_API_VERSION
+#define VMCI_KERNEL_API_VERSION_1 1
+#define VMCI_KERNEL_API_VERSION_2 2
+#define VMCI_KERNEL_API_VERSION   VMCI_KERNEL_API_VERSION_2
+
+typedef void (vmci_device_shutdown_fn) (void *device_registration,
+					void *user_data);
+
+int vmci_datagram_create_handle(u32 resource_id, u32 flags,
+				vmci_datagram_recv_cb recv_cb,
+				void *client_data,
+				struct vmci_handle *out_handle);
+int vmci_datagram_create_handle_priv(u32 resource_id, u32 flags, u32 priv_flags,
+				     vmci_datagram_recv_cb recv_cb,
+				     void *client_data,
+				     struct vmci_handle *out_handle);
+int vmci_datagram_destroy_handle(struct vmci_handle handle);
+int vmci_datagram_send(struct vmci_datagram *msg);
+int vmci_doorbell_create(struct vmci_handle *handle, u32 flags,
+			 u32 priv_flags,
+			 vmci_callback notify_cb, void *client_data);
+int vmci_doorbell_destroy(struct vmci_handle handle);
+int vmci_doorbell_notify(struct vmci_handle handle, u32 priv_flags);
+u32 vmci_get_context_id(void);
+bool vmci_is_context_owner(u32 context_id, kuid_t uid);
+
+int vmci_event_subscribe(u32 event,
+			 vmci_event_cb callback, void *callback_data,
+			 u32 *subid);
+int vmci_event_unsubscribe(u32 subid);
+u32 vmci_context_get_priv_flags(u32 context_id);
+int vmci_qpair_alloc(struct vmci_qp **qpair,
+		     struct vmci_handle *handle,
+		     u64 produce_qsize,
+		     u64 consume_qsize,
+		     u32 peer, u32 flags, u32 priv_flags);
+int vmci_qpair_detach(struct vmci_qp **qpair);
+int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
+				   u64 *producer_tail,
+				   u64 *consumer_head);
+int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
+				   u64 *consumer_tail,
+				   u64 *producer_head);
+s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair);
+s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair);
+s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair);
+s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair);
+ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
+			   const void *buf, size_t buf_size, int mode);
+ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
+			   void *buf, size_t buf_size, int mode);
+ssize_t vmci_qpair_peek(struct vmci_qp *qpair, void *buf, size_t buf_size,
+			int mode);
+ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
+			  void *iov, size_t iov_size, int mode);
+ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
+			  void *iov, size_t iov_size, int mode);
+ssize_t vmci_qpair_peekv(struct vmci_qp *qpair, void *iov, size_t iov_size,
+			 int mode);
+
+#endif /* !__VMW_VMCI_API_H__ */
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
new file mode 100644
index 0000000..65ac54c
--- /dev/null
+++ b/include/linux/vmw_vmci_defs.h
@@ -0,0 +1,880 @@
+/*
+ * VMware VMCI Driver
+ *
+ * Copyright (C) 2012 VMware, Inc. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation version 2 and no later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+
+#ifndef _VMW_VMCI_DEF_H_
+#define _VMW_VMCI_DEF_H_
+
+#include <linux/atomic.h>
+
+/* Register offsets. */
+#define VMCI_STATUS_ADDR      0x00
+#define VMCI_CONTROL_ADDR     0x04
+#define VMCI_ICR_ADDR	      0x08
+#define VMCI_IMR_ADDR         0x0c
+#define VMCI_DATA_OUT_ADDR    0x10
+#define VMCI_DATA_IN_ADDR     0x14
+#define VMCI_CAPS_ADDR        0x18
+#define VMCI_RESULT_LOW_ADDR  0x1c
+#define VMCI_RESULT_HIGH_ADDR 0x20
+
+/* Max number of devices. */
+#define VMCI_MAX_DEVICES 1
+
+/* Status register bits. */
+#define VMCI_STATUS_INT_ON     0x1
+
+/* Control register bits. */
+#define VMCI_CONTROL_RESET        0x1
+#define VMCI_CONTROL_INT_ENABLE   0x2
+#define VMCI_CONTROL_INT_DISABLE  0x4
+
+/* Capabilities register bits. */
+#define VMCI_CAPS_HYPERCALL     0x1
+#define VMCI_CAPS_GUESTCALL     0x2
+#define VMCI_CAPS_DATAGRAM      0x4
+#define VMCI_CAPS_NOTIFICATIONS 0x8
+
+/* Interrupt Cause register bits. */
+#define VMCI_ICR_DATAGRAM      0x1
+#define VMCI_ICR_NOTIFICATION  0x2
+
+/* Interrupt Mask register bits. */
+#define VMCI_IMR_DATAGRAM      0x1
+#define VMCI_IMR_NOTIFICATION  0x2
+
+/* Interrupt type. */
+enum {
+	VMCI_INTR_TYPE_INTX = 0,
+	VMCI_INTR_TYPE_MSI = 1,
+	VMCI_INTR_TYPE_MSIX = 2,
+};
+
+/* Maximum MSI/MSI-X interrupt vectors in the device. */
+#define VMCI_MAX_INTRS 2
+
+/*
+ * Supported interrupt vectors.  There is one for each ICR value above,
+ * but here they indicate the position in the vector array/message ID.
+ */
+enum {
+	VMCI_INTR_DATAGRAM = 0,
+	VMCI_INTR_NOTIFICATION = 1,
+};
+
+/*
+ * A single VMCI device has an upper limit of 128MB on the amount of
+ * memory that can be used for queue pairs.
+ */
+#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024)
+
+/*
+ * Queues with pre-mapped data pages must be small, so that we don't pin
+ * too much kernel memory (especially on vmkernel).  We limit a queuepair to
+ * 32 KB, or 16 KB per queue for symmetrical pairs.
+ */
+#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024)
+
+/*
+ * We have a fixed set of resource IDs available in the VMX.
+ * This allows us to have a very simple implementation since we statically
+ * know how many will create datagram handles. If a new caller arrives and
+ * we have run out of slots we can manually increment the maximum size of
+ * available resource IDs.
+ *
+ * VMCI reserved hypervisor datagram resource IDs.
+ */
+enum {
+	VMCI_RESOURCES_QUERY = 0,
+	VMCI_GET_CONTEXT_ID = 1,
+	VMCI_SET_NOTIFY_BITMAP = 2,
+	VMCI_DOORBELL_LINK = 3,
+	VMCI_DOORBELL_UNLINK = 4,
+	VMCI_DOORBELL_NOTIFY = 5,
+	/*
+	 * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
+	 * obsoleted by the removal of VM to VM communication.
+	 */
+	VMCI_DATAGRAM_REQUEST_MAP = 6,
+	VMCI_DATAGRAM_REMOVE_MAP = 7,
+	VMCI_EVENT_SUBSCRIBE = 8,
+	VMCI_EVENT_UNSUBSCRIBE = 9,
+	VMCI_QUEUEPAIR_ALLOC = 10,
+	VMCI_QUEUEPAIR_DETACH = 11,
+
+	/*
+	 * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
+	 * WS 7.0/7.1 and ESX 4.1
+	 */
+	VMCI_HGFS_TRANSPORT = 13,
+	VMCI_UNITY_PBRPC_REGISTER = 14,
+	VMCI_RPC_PRIVILEGED = 15,
+	VMCI_RPC_UNPRIVILEGED = 16,
+	VMCI_RESOURCE_MAX = 17,
+};
+
+/*
+ * struct vmci_handle - Ownership information structure
+ * @context:    The VMX context ID.
+ * @resource:   The resource ID (used for locating in resource hash).
+ *
+ * The vmci_handle structure is used to track resources used within
+ * vmw_vmci.
+ */
+struct vmci_handle {
+	u32 context;
+	u32 resource;
+};
+
+#define vmci_make_handle(_cid, _rid) \
+	(struct vmci_handle){ .context = _cid, .resource = _rid }
+
+static inline bool vmci_handle_is_equal(struct vmci_handle h1,
+					struct vmci_handle h2)
+{
+	return h1.context == h2.context && h1.resource == h2.resource;
+}
+
+#define VMCI_INVALID_ID ~0
+static const struct vmci_handle VMCI_INVALID_HANDLE = {
+	.context = VMCI_INVALID_ID,
+	.resource = VMCI_INVALID_ID
+};
+
+static inline bool vmci_handle_is_invalid(struct vmci_handle h)
+{
+	return vmci_handle_is_equal(h, VMCI_INVALID_HANDLE);
+}
+
+/*
+ * The below defines can be used to send anonymous requests.
+ * This also indicates that no response is expected.
+ */
+#define VMCI_ANON_SRC_CONTEXT_ID   VMCI_INVALID_ID
+#define VMCI_ANON_SRC_RESOURCE_ID  VMCI_INVALID_ID
+static const struct vmci_handle VMCI_ANON_SRC_HANDLE = {
+	.context = VMCI_ANON_SRC_CONTEXT_ID,
+	.resource = VMCI_ANON_SRC_RESOURCE_ID
+};
+
+/* The lowest 16 context ids are reserved for internal use. */
+#define VMCI_RESERVED_CID_LIMIT ((u32) 16)
+
+/*
+ * Hypervisor context id, used for calling into hypervisor
+ * supplied services from the VM.
+ */
+#define VMCI_HYPERVISOR_CONTEXT_ID 0
+
+/*
+ * Well-known context id, a logical context that contains a set of
+ * well-known services. This context ID is now obsolete.
+ */
+#define VMCI_WELL_KNOWN_CONTEXT_ID 1
+
+/*
+ * Context ID used by host endpoints.
+ */
+#define VMCI_HOST_CONTEXT_ID  2
+
+#define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != (_cid) &&		\
+				  (_cid) > VMCI_HOST_CONTEXT_ID)
+
+/*
+ * The VMCI_CONTEXT_RESOURCE_ID is used together with vmci_make_handle to make
+ * handles that refer to a specific context.
+ */
+#define VMCI_CONTEXT_RESOURCE_ID 0
+
+/*
+ * VMCI error codes.
+ */
+enum {
+	VMCI_SUCCESS_QUEUEPAIR_ATTACH	= 5,
+	VMCI_SUCCESS_QUEUEPAIR_CREATE	= 4,
+	VMCI_SUCCESS_LAST_DETACH	= 3,
+	VMCI_SUCCESS_ACCESS_GRANTED	= 2,
+	VMCI_SUCCESS_ENTRY_DEAD		= 1,
+	VMCI_SUCCESS			 = 0,
+	VMCI_ERROR_INVALID_RESOURCE	 = (-1),
+	VMCI_ERROR_INVALID_ARGS		 = (-2),
+	VMCI_ERROR_NO_MEM		 = (-3),
+	VMCI_ERROR_DATAGRAM_FAILED	 = (-4),
+	VMCI_ERROR_MORE_DATA		 = (-5),
+	VMCI_ERROR_NO_MORE_DATAGRAMS	 = (-6),
+	VMCI_ERROR_NO_ACCESS		 = (-7),
+	VMCI_ERROR_NO_HANDLE		 = (-8),
+	VMCI_ERROR_DUPLICATE_ENTRY	 = (-9),
+	VMCI_ERROR_DST_UNREACHABLE	 = (-10),
+	VMCI_ERROR_PAYLOAD_TOO_LARGE	 = (-11),
+	VMCI_ERROR_INVALID_PRIV		 = (-12),
+	VMCI_ERROR_GENERIC		 = (-13),
+	VMCI_ERROR_PAGE_ALREADY_SHARED	 = (-14),
+	VMCI_ERROR_CANNOT_SHARE_PAGE	 = (-15),
+	VMCI_ERROR_CANNOT_UNSHARE_PAGE	 = (-16),
+	VMCI_ERROR_NO_PROCESS		 = (-17),
+	VMCI_ERROR_NO_DATAGRAM		 = (-18),
+	VMCI_ERROR_NO_RESOURCES		 = (-19),
+	VMCI_ERROR_UNAVAILABLE		 = (-20),
+	VMCI_ERROR_NOT_FOUND		 = (-21),
+	VMCI_ERROR_ALREADY_EXISTS	 = (-22),
+	VMCI_ERROR_NOT_PAGE_ALIGNED	 = (-23),
+	VMCI_ERROR_INVALID_SIZE		 = (-24),
+	VMCI_ERROR_REGION_ALREADY_SHARED = (-25),
+	VMCI_ERROR_TIMEOUT		 = (-26),
+	VMCI_ERROR_DATAGRAM_INCOMPLETE	 = (-27),
+	VMCI_ERROR_INCORRECT_IRQL	 = (-28),
+	VMCI_ERROR_EVENT_UNKNOWN	 = (-29),
+	VMCI_ERROR_OBSOLETE		 = (-30),
+	VMCI_ERROR_QUEUEPAIR_MISMATCH	 = (-31),
+	VMCI_ERROR_QUEUEPAIR_NOTSET	 = (-32),
+	VMCI_ERROR_QUEUEPAIR_NOTOWNER	 = (-33),
+	VMCI_ERROR_QUEUEPAIR_NOTATTACHED = (-34),
+	VMCI_ERROR_QUEUEPAIR_NOSPACE	 = (-35),
+	VMCI_ERROR_QUEUEPAIR_NODATA	 = (-36),
+	VMCI_ERROR_BUSMEM_INVALIDATION	 = (-37),
+	VMCI_ERROR_MODULE_NOT_LOADED	 = (-38),
+	VMCI_ERROR_DEVICE_NOT_FOUND	 = (-39),
+	VMCI_ERROR_QUEUEPAIR_NOT_READY	 = (-40),
+	VMCI_ERROR_WOULD_BLOCK		 = (-41),
+
+	/* VMCI clients should return error code within this range */
+	VMCI_ERROR_CLIENT_MIN		 = (-500),
+	VMCI_ERROR_CLIENT_MAX		 = (-550),
+
+	/* Internal error codes. */
+	VMCI_SHAREDMEM_ERROR_BAD_CONTEXT = (-1000),
+};
+
+/* VMCI reserved events. */
+enum {
+	/* Only applicable to guest endpoints */
+	VMCI_EVENT_CTX_ID_UPDATE  = 0,
+
+	/* Applicable to guest and host */
+	VMCI_EVENT_CTX_REMOVED	  = 1,
+
+	/* Only applicable to guest endpoints */
+	VMCI_EVENT_QP_RESUMED	  = 2,
+
+	/* Applicable to guest and host */
+	VMCI_EVENT_QP_PEER_ATTACH = 3,
+
+	/* Applicable to guest and host */
+	VMCI_EVENT_QP_PEER_DETACH = 4,
+
+	/*
+	 * Applicable to VMX and vmk.  On vmk,
+	 * this event has the Context payload type.
+	 */
+	VMCI_EVENT_MEM_ACCESS_ON  = 5,
+
+	/*
+	 * Applicable to VMX and vmk.  Same as
+	 * above for the payload type.
+	 */
+	VMCI_EVENT_MEM_ACCESS_OFF = 6,
+	VMCI_EVENT_MAX		  = 7,
+};
+
+/*
+ * Of the above events, a few are reserved for use in the VMX, and
+ * other endpoints (guest and host kernel) should not use them. For
+ * the rest of the events, we allow both host and guest endpoints to
+ * subscribe to them, to maintain the same API for host and guest
+ * endpoints.
+ */
+#define VMCI_EVENT_VALID_VMX(_event) ((_event) == VMCI_EVENT_MEM_ACCESS_ON || \
+				      (_event) == VMCI_EVENT_MEM_ACCESS_OFF)
+
+#define VMCI_EVENT_VALID(_event) ((_event) < VMCI_EVENT_MAX &&		\
+				  !VMCI_EVENT_VALID_VMX(_event))
+
+/* Reserved guest datagram resource ids. */
+#define VMCI_EVENT_HANDLER 0
+
+/*
+ * VMCI coarse-grained privileges (per context or host
+ * process/endpoint. An entity with the restricted flag is only
+ * allowed to interact with the hypervisor and trusted entities.
+ */
+enum {
+	VMCI_NO_PRIVILEGE_FLAGS = 0,
+	VMCI_PRIVILEGE_FLAG_RESTRICTED = 1,
+	VMCI_PRIVILEGE_FLAG_TRUSTED = 2,
+	VMCI_PRIVILEGE_ALL_FLAGS = (VMCI_PRIVILEGE_FLAG_RESTRICTED |
+				    VMCI_PRIVILEGE_FLAG_TRUSTED),
+	VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS = VMCI_NO_PRIVILEGE_FLAGS,
+	VMCI_LEAST_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_RESTRICTED,
+	VMCI_MAX_PRIVILEGE_FLAGS = VMCI_PRIVILEGE_FLAG_TRUSTED,
+};
+
+/* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
+#define VMCI_RESERVED_RESOURCE_ID_MAX 1023
+
+/*
+ * Driver version.
+ *
+ * Increment major version when you make an incompatible change.
+ * Compatibility goes both ways (old driver with new executable
+ * as well as new driver with old executable).
+ */
+
+/* Never change VMCI_VERSION_SHIFT_WIDTH */
+#define VMCI_VERSION_SHIFT_WIDTH 16
+#define VMCI_MAKE_VERSION(_major, _minor)			\
+	((_major) << VMCI_VERSION_SHIFT_WIDTH | (u16) (_minor))
+
+#define VMCI_VERSION_MAJOR(v)  ((u32) (v) >> VMCI_VERSION_SHIFT_WIDTH)
+#define VMCI_VERSION_MINOR(v)  ((u16) (v))
+
+/*
+ * VMCI_VERSION is always the current version.  Subsequently listed
+ * versions are ways of detecting previous versions of the connecting
+ * application (i.e., VMX).
+ *
+ * VMCI_VERSION_NOVMVM: This version removed support for VM to VM
+ * communication.
+ *
+ * VMCI_VERSION_NOTIFY: This version introduced doorbell notification
+ * support.
+ *
+ * VMCI_VERSION_HOSTQP: This version introduced host end point support
+ * for hosted products.
+ *
+ * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of
+ * support for host end-points.
+ *
+ * VMCI_VERSION_PREVERS2: This fictional version number is intended to
+ * represent the version of a VMX which doesn't call into the driver
+ * with ioctl VERSION2 and thus doesn't establish its version with the
+ * driver.
+ */
+
+#define VMCI_VERSION                VMCI_VERSION_NOVMVM
+#define VMCI_VERSION_NOVMVM         VMCI_MAKE_VERSION(11, 0)
+#define VMCI_VERSION_NOTIFY         VMCI_MAKE_VERSION(10, 0)
+#define VMCI_VERSION_HOSTQP         VMCI_MAKE_VERSION(9, 0)
+#define VMCI_VERSION_PREHOSTQP      VMCI_MAKE_VERSION(8, 0)
+#define VMCI_VERSION_PREVERS2       VMCI_MAKE_VERSION(1, 0)
+
+#define VMCI_SOCKETS_MAKE_VERSION(_p)					\
+	((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2]))
+
+/*
+ * The VMCI IOCTLs.  We use identity code 7, as noted in ioctl-number.h, and
+ * we start at sequence 9f.  This gives us the same values that our shipping
+ * products use, starting at 1951, provided we leave out the direction and
+ * structure size.  Note that VMMon occupies the block following us, starting
+ * at 2001.
+ */
+#define IOCTL_VMCI_VERSION			_IO(7, 0x9f)	/* 1951 */
+#define IOCTL_VMCI_INIT_CONTEXT			_IO(7, 0xa0)
+#define IOCTL_VMCI_QUEUEPAIR_SETVA		_IO(7, 0xa4)
+#define IOCTL_VMCI_NOTIFY_RESOURCE		_IO(7, 0xa5)
+#define IOCTL_VMCI_NOTIFICATIONS_RECEIVE	_IO(7, 0xa6)
+#define IOCTL_VMCI_VERSION2			_IO(7, 0xa7)
+#define IOCTL_VMCI_QUEUEPAIR_ALLOC		_IO(7, 0xa8)
+#define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE	_IO(7, 0xa9)
+#define IOCTL_VMCI_QUEUEPAIR_DETACH		_IO(7, 0xaa)
+#define IOCTL_VMCI_DATAGRAM_SEND		_IO(7, 0xab)
+#define IOCTL_VMCI_DATAGRAM_RECEIVE		_IO(7, 0xac)
+#define IOCTL_VMCI_CTX_ADD_NOTIFICATION		_IO(7, 0xaf)
+#define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION	_IO(7, 0xb0)
+#define IOCTL_VMCI_CTX_GET_CPT_STATE		_IO(7, 0xb1)
+#define IOCTL_VMCI_CTX_SET_CPT_STATE		_IO(7, 0xb2)
+#define IOCTL_VMCI_GET_CONTEXT_ID		_IO(7, 0xb3)
+#define IOCTL_VMCI_SOCKETS_VERSION		_IO(7, 0xb4)
+#define IOCTL_VMCI_SOCKETS_GET_AF_VALUE		_IO(7, 0xb8)
+#define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID	_IO(7, 0xb9)
+#define IOCTL_VMCI_SET_NOTIFY			_IO(7, 0xcb)	/* 1995 */
+/*IOCTL_VMMON_START				_IO(7, 0xd1)*/	/* 2001 */
+
+/*
+ * struct vmci_queue_header - VMCI Queue Header information.
+ *
+ * A Queue cannot stand by itself as designed.  Each Queue's header
+ * contains a pointer into itself (the producer_tail) and into its peer
+ * (consumer_head).  The reason for the separation is one of
+ * accessibility: Each end-point can modify two things: where the next
+ * location to enqueue is within its produce_q (producer_tail); and
+ * where the next dequeue location is in its consume_q (consumer_head).
+ *
+ * An end-point cannot modify the pointers of its peer (guest to
+ * guest; NOTE that in the host both queue headers are mapped r/w).
+ * But, each end-point needs read access to both Queue header
+ * structures in order to determine how much space is used (or left)
+ * in the Queue.  This is because for an end-point to know how full
+ * its produce_q is, it needs to use the consumer_head that points into
+ * the produce_q but -that- consumer_head is in the Queue header for
+ * that end-points consume_q.
+ *
+ * Thoroughly confused?  Sorry.
+ *
+ * producer_tail: the point to enqueue new entrants.  When you approach
+ * a line in a store, for example, you walk up to the tail.
+ *
+ * consumer_head: the point in the queue from which the next element is
+ * dequeued.  In other words, who is next in line is he who is at the
+ * head of the line.
+ *
+ * Also, producer_tail points to an empty byte in the Queue, whereas
+ * consumer_head points to a valid byte of data (unless producer_tail ==
+ * consumer_head in which case consumer_head does not point to a valid
+ * byte of data).
+ *
+ * For a queue of buffer 'size' bytes, the tail and head pointers will be in
+ * the range [0, size-1].
+ *
+ * If produce_q_header->producer_tail == consume_q_header->consumer_head
+ * then the produce_q is empty.
+ */
+struct vmci_queue_header {
+	/* All fields are 64bit and aligned. */
+	struct vmci_handle handle;	/* Identifier. */
+	atomic64_t producer_tail;	/* Offset in this queue. */
+	atomic64_t consumer_head;	/* Offset in peer queue. */
+};
+
+/*
+ * struct vmci_datagram - Base struct for vmci datagrams.
+ * @dst:        A vmci_handle that tracks the destination of the datagram.
+ * @src:        A vmci_handle that tracks the source of the datagram.
+ * @payload_size:       The size of the payload.
+ *
+ * vmci_datagram structs are used when sending vmci datagrams.  They include
+ * the necessary source and destination information to properly route
+ * the information along with the size of the package.
+ */
+struct vmci_datagram {
+	struct vmci_handle dst;
+	struct vmci_handle src;
+	u64 payload_size;
+};
+
+/*
+ * Second flag is for creating a well-known handle instead of a per context
+ * handle.  Next flag is for deferring datagram delivery, so that the
+ * datagram callback is invoked in a delayed context (not interrupt context).
+ */
+#define VMCI_FLAG_DG_NONE          0
+#define VMCI_FLAG_WELLKNOWN_DG_HND 0x1
+#define VMCI_FLAG_ANYCID_DG_HND    0x2
+#define VMCI_FLAG_DG_DELAYED_CB    0x4
+
+/*
+ * Maximum supported size of a VMCI datagram for routable datagrams.
+ * Datagrams going to the hypervisor are allowed to be larger.
+ */
+#define VMCI_MAX_DG_SIZE (17 * 4096)
+#define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - \
+				  sizeof(struct vmci_datagram))
+#define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) +			\
+				      sizeof(struct vmci_datagram))
+#define VMCI_DG_HEADERSIZE sizeof(struct vmci_datagram)
+#define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payload_size)
+#define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (~((size_t) 0x7)))
+#define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2)
+
+struct vmci_event_payload_qp {
+	struct vmci_handle handle;  /* queue_pair handle. */
+	u32 peer_id;		    /* Context id of attaching/detaching VM. */
+	u32 _pad;
+};
+
+/* Flags for VMCI queue_pair API. */
+enum {
+	/* Fail alloc if QP not created by peer. */
+	VMCI_QPFLAG_ATTACH_ONLY = 1 << 0,
+
+	/* Only allow attaches from local context. */
+	VMCI_QPFLAG_LOCAL = 1 << 1,
+
+	/* Host won't block when guest is quiesced. */
+	VMCI_QPFLAG_NONBLOCK = 1 << 2,
+
+	/* Pin data pages in ESX.  Used with NONBLOCK */
+	VMCI_QPFLAG_PINNED = 1 << 3,
+
+	/* Update the following flag when adding new flags. */
+	VMCI_QP_ALL_FLAGS = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL |
+			     VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
+
+	/* Convenience flags */
+	VMCI_QP_ASYMM = (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED),
+	VMCI_QP_ASYMM_PEER = (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM),
+};
+
+/*
+ * We allow at least 1024 more event datagrams from the hypervisor past the
+ * normally allowed datagrams pending for a given context.  We define this
+ * limit on event datagrams from the hypervisor to guard against DoS attack
+ * from a malicious VM which could repeatedly attach to and detach from a queue
+ * pair, causing events to be queued at the destination VM.  However, the rate
+ * at which such events can be generated is small since it requires a VM exit
+ * and handling of queue pair attach/detach call at the hypervisor.  Event
+ * datagrams may be queued up at the destination VM if it has interrupts
+ * disabled or if it is not draining events for some other reason.  1024
+ * datagrams is a grossly conservative estimate of the time for which
+ * interrupts may be disabled in the destination VM, but at the same time does
+ * not exacerbate the memory pressure problem on the host by much (size of each
+ * event datagram is small).
+ */
+#define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE				\
+	(VMCI_MAX_DATAGRAM_QUEUE_SIZE +					\
+	 1024 * (sizeof(struct vmci_datagram) +				\
+		 sizeof(struct vmci_event_data_max)))
+
+/*
+ * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of
+ * hypervisor resources.  Struct size is 16 bytes. All fields in struct are
+ * aligned to their natural alignment.
+ */
+struct vmci_resource_query_hdr {
+	struct vmci_datagram hdr;
+	u32 num_resources;
+	u32 _padding;
+};
+
+/*
+ * Convenience struct for negotiating vectors. Must match layout of
+ * VMCIResourceQueryHdr minus the struct vmci_datagram header.
+ */
+struct vmci_resource_query_msg {
+	u32 num_resources;
+	u32 _padding;
+	u32 resources[1];
+};
+
+/*
+ * The maximum number of resources that can be queried using
+ * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31
+ * bits of a positive return value. Negative values are reserved for
+ * errors.
+ */
+#define VMCI_RESOURCE_QUERY_MAX_NUM 31
+
+/* Maximum size for the VMCI_RESOURCE_QUERY request. */
+#define VMCI_RESOURCE_QUERY_MAX_SIZE				\
+	(sizeof(struct vmci_resource_query_hdr) +		\
+	 sizeof(u32) * VMCI_RESOURCE_QUERY_MAX_NUM)
+
+/*
+ * Struct used for setting the notification bitmap.  All fields in
+ * struct are aligned to their natural alignment.
+ */
+struct vmci_notify_bm_set_msg {
+	struct vmci_datagram hdr;
+	u32 bitmap_ppn;
+	u32 _pad;
+};
+
+/*
+ * Struct used for linking a doorbell handle with an index in the
+ * notify bitmap. All fields in struct are aligned to their natural
+ * alignment.
+ */
+struct vmci_doorbell_link_msg {
+	struct vmci_datagram hdr;
+	struct vmci_handle handle;
+	u64 notify_idx;
+};
+
+/*
+ * Struct used for unlinking a doorbell handle from an index in the
+ * notify bitmap. All fields in struct are aligned to their natural
+ * alignment.
+ */
+struct vmci_doorbell_unlink_msg {
+	struct vmci_datagram hdr;
+	struct vmci_handle handle;
+};
+
+/*
+ * Struct used for generating a notification on a doorbell handle. All
+ * fields in struct are aligned to their natural alignment.
+ */
+struct vmci_doorbell_notify_msg {
+	struct vmci_datagram hdr;
+	struct vmci_handle handle;
+};
+
+/*
+ * This struct is used to contain data for events.  Size of this struct is a
+ * multiple of 8 bytes, and all fields are aligned to their natural alignment.
+ */
+struct vmci_event_data {
+	u32 event;		/* 4 bytes. */
+	u32 _pad;
+	/* Event payload is put here. */
+};
+
+/*
+ * Define the different VMCI_EVENT payload data types here.  All structs must
+ * be a multiple of 8 bytes, and fields must be aligned to their natural
+ * alignment.
+ */
+struct vmci_event_payld_ctx {
+	u32 context_id;	/* 4 bytes. */
+	u32 _pad;
+};
+
+struct vmci_event_payld_qp {
+	struct vmci_handle handle;  /* queue_pair handle. */
+	u32 peer_id;	    /* Context id of attaching/detaching VM. */
+	u32 _pad;
+};
+
+/*
+ * We define the following struct to get the size of the maximum event
+ * data the hypervisor may send to the guest.  If adding a new event
+ * payload type above, add it to the following struct too (inside the
+ * union).
+ */
+struct vmci_event_data_max {
+	struct vmci_event_data event_data;
+	union {
+		struct vmci_event_payld_ctx context_payload;
+		struct vmci_event_payld_qp qp_payload;
+	} ev_data_payload;
+};
+
+/*
+ * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and
+ * VMCI_EVENT_HANDLER messages.  Struct size is 32 bytes.  All fields
+ * in struct are aligned to their natural alignment.
+ */
+struct vmci_event_msg {
+	struct vmci_datagram hdr;
+
+	/* Has event type and payload. */
+	struct vmci_event_data event_data;
+
+	/* Payload gets put here. */
+};
+
+/* Event with context payload. */
+struct vmci_event_ctx {
+	struct vmci_event_msg msg;
+	struct vmci_event_payld_ctx payload;
+};
+
+/* Event with QP payload. */
+struct vmci_event_qp {
+	struct vmci_event_msg msg;
+	struct vmci_event_payld_qp payload;
+};
+
+/*
+ * Structs used for queue_pair alloc and detach messages.  We align fields of
+ * these structs to 64bit boundaries.
+ */
+struct vmci_qp_alloc_msg {
+	struct vmci_datagram hdr;
+	struct vmci_handle handle;
+	u32 peer;
+	u32 flags;
+	u64 produce_size;
+	u64 consume_size;
+	u64 num_ppns;
+
+	/* List of PPNs placed here. */
+};
+
+struct vmci_qp_detach_msg {
+	struct vmci_datagram hdr;
+	struct vmci_handle handle;
+};
+
+/* VMCI Doorbell API. */
+#define VMCI_FLAG_DELAYED_CB 0x01
+
+typedef void (*vmci_callback) (void *client_data);
+
+/*
+ * struct vmci_qp - A vmw_vmci queue pair handle.
+ *
+ * This structure is used as a handle to a queue pair created by
+ * VMCI.  It is intentionally left opaque to clients.
+ */
+struct vmci_qp;
+
+/* Callback needed for correctly waiting on events. */
+typedef int (*vmci_datagram_recv_cb) (void *client_data,
+				      struct vmci_datagram *msg);
+
+/* VMCI Event API. */
+typedef void (*vmci_event_cb) (u32 sub_id, const struct vmci_event_data *ed,
+			       void *client_data);
+
+/*
+ * We use the following inline function to access the payload data
+ * associated with an event data.
+ */
+static inline const void *
+vmci_event_data_const_payload(const struct vmci_event_data *ev_data)
+{
+	return (const char *)ev_data + sizeof(*ev_data);
+}
+
+static inline void *vmci_event_data_payload(struct vmci_event_data *ev_data)
+{
+	return (void *)vmci_event_data_const_payload(ev_data);
+}
+
+/*
+ * Helper to add a given offset to a head or tail pointer. Wraps the
+ * value of the pointer around the max size of the queue.
+ */
+static inline void vmci_qp_add_pointer(atomic64_t *var,
+				       size_t add,
+				       u64 size)
+{
+	u64 new_val = atomic64_read(var);
+
+	if (new_val >= size - add)
+		new_val -= size;
+
+	new_val += add;
+
+	atomic64_set(var, new_val);
+}
+
+/*
+ * Helper routine to get the Producer Tail from the supplied queue.
+ */
+static inline u64
+vmci_q_header_producer_tail(const struct vmci_queue_header *q_header)
+{
+	struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
+	return atomic64_read(&qh->producer_tail);
+}
+
+/*
+ * Helper routine to get the Consumer Head from the supplied queue.
+ */
+static inline u64
+vmci_q_header_consumer_head(const struct vmci_queue_header *q_header)
+{
+	struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
+	return atomic64_read(&qh->consumer_head);
+}
+
+/*
+ * Helper routine to increment the Producer Tail.  Fundamentally,
+ * vmci_qp_add_pointer() is used to manipulate the tail itself.
+ */
+static inline void
+vmci_q_header_add_producer_tail(struct vmci_queue_header *q_header,
+				size_t add,
+				u64 queue_size)
+{
+	vmci_qp_add_pointer(&q_header->producer_tail, add, queue_size);
+}
+
+/*
+ * Helper routine to increment the Consumer Head.  Fundamentally,
+ * vmci_qp_add_pointer() is used to manipulate the head itself.
+ */
+static inline void
+vmci_q_header_add_consumer_head(struct vmci_queue_header *q_header,
+				size_t add,
+				u64 queue_size)
+{
+	vmci_qp_add_pointer(&q_header->consumer_head, add, queue_size);
+}
+
+/*
+ * Helper routine for getting the head and the tail pointer for a queue.
+ * Both the VMCIQueues are needed to get both the pointers for one queue.
+ */
+static inline void
+vmci_q_header_get_pointers(const struct vmci_queue_header *produce_q_header,
+			   const struct vmci_queue_header *consume_q_header,
+			   u64 *producer_tail,
+			   u64 *consumer_head)
+{
+	if (producer_tail)
+		*producer_tail = vmci_q_header_producer_tail(produce_q_header);
+
+	if (consumer_head)
+		*consumer_head = vmci_q_header_consumer_head(consume_q_header);
+}
+
+static inline void vmci_q_header_init(struct vmci_queue_header *q_header,
+				      const struct vmci_handle handle)
+{
+	q_header->handle = handle;
+	atomic64_set(&q_header->producer_tail, 0);
+	atomic64_set(&q_header->consumer_head, 0);
+}
+
+/*
+ * Finds available free space in a produce queue to enqueue more
+ * data or reports an error if queue pair corruption is detected.
+ */
+static s64
+vmci_q_header_free_space(const struct vmci_queue_header *produce_q_header,
+			 const struct vmci_queue_header *consume_q_header,
+			 const u64 produce_q_size)
+{
+	u64 tail;
+	u64 head;
+	u64 free_space;
+
+	tail = vmci_q_header_producer_tail(produce_q_header);
+	head = vmci_q_header_consumer_head(consume_q_header);
+
+	if (tail >= produce_q_size || head >= produce_q_size)
+		return VMCI_ERROR_INVALID_SIZE;
+
+	/*
+	 * Deduct 1 to avoid tail becoming equal to head which causes
+	 * ambiguity. If head and tail are equal it means that the
+	 * queue is empty.
+	 */
+	if (tail >= head)
+		free_space = produce_q_size - (tail - head) - 1;
+	else
+		free_space = head - tail - 1;
+
+	return free_space;
+}
+
+/*
+ * vmci_q_header_free_space() does all the heavy lifting of
+ * determing the number of free bytes in a Queue.  This routine,
+ * then subtracts that size from the full size of the Queue so
+ * the caller knows how many bytes are ready to be dequeued.
+ * Results:
+ * On success, available data size in bytes (up to MAX_INT64).
+ * On failure, appropriate error code.
+ */
+static inline s64
+vmci_q_header_buf_ready(const struct vmci_queue_header *consume_q_header,
+			const struct vmci_queue_header *produce_q_header,
+			const u64 consume_q_size)
+{
+	s64 free_space;
+
+	free_space = vmci_q_header_free_space(consume_q_header,
+					      produce_q_header, consume_q_size);
+	if (free_space < VMCI_SUCCESS)
+		return free_space;
+
+	return consume_q_size - free_space - 1;
+}
+
+
+#endif /* _VMW_VMCI_DEF_H_ */
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index d25a469..c800ea4 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -97,7 +97,7 @@
  * The location of the interface configuration file.
  */
 
-#define KVP_CONFIG_LOC	"/var/opt/"
+#define KVP_CONFIG_LOC	"/var/lib/hyperv"
 
 #define MAX_FILE_NAME 100
 #define ENTRIES_PER_BLOCK 50
@@ -151,7 +151,7 @@
 	 */
 	kvp_acquire_lock(pool);
 
-	filep = fopen(kvp_file_info[pool].fname, "w");
+	filep = fopen(kvp_file_info[pool].fname, "we");
 	if (!filep) {
 		kvp_release_lock(pool);
 		syslog(LOG_ERR, "Failed to open file, pool: %d", pool);
@@ -182,7 +182,7 @@
 
 	kvp_acquire_lock(pool);
 
-	filep = fopen(kvp_file_info[pool].fname, "r");
+	filep = fopen(kvp_file_info[pool].fname, "re");
 	if (!filep) {
 		kvp_release_lock(pool);
 		syslog(LOG_ERR, "Failed to open file, pool: %d", pool);
@@ -234,9 +234,9 @@
 	int i;
 	int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
 
-	if (access("/var/opt/hyperv", F_OK)) {
-		if (mkdir("/var/opt/hyperv", S_IRUSR | S_IWUSR | S_IROTH)) {
-			syslog(LOG_ERR, " Failed to create /var/opt/hyperv");
+	if (access(KVP_CONFIG_LOC, F_OK)) {
+		if (mkdir(KVP_CONFIG_LOC, 0755 /* rwxr-xr-x */)) {
+			syslog(LOG_ERR, " Failed to create %s", KVP_CONFIG_LOC);
 			exit(EXIT_FAILURE);
 		}
 	}
@@ -245,14 +245,14 @@
 		fname = kvp_file_info[i].fname;
 		records_read = 0;
 		num_blocks = 1;
-		sprintf(fname, "/var/opt/hyperv/.kvp_pool_%d", i);
-		fd = open(fname, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR | S_IROTH);
+		sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
+		fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
 
 		if (fd == -1)
 			return 1;
 
 
-		filep = fopen(fname, "r");
+		filep = fopen(fname, "re");
 		if (!filep)
 			return 1;
 
@@ -1162,16 +1162,13 @@
 				snprintf(str, sizeof(str), "%s", "DNS");
 				break;
 			}
-			if (i != 0) {
-				if (type != DNS) {
-					snprintf(sub_str, sizeof(sub_str),
-						"_%d", i++);
-				} else {
-					snprintf(sub_str, sizeof(sub_str),
-						"%d", ++i);
-				}
-			} else if (type == DNS) {
+
+			if (type == DNS) {
 				snprintf(sub_str, sizeof(sub_str), "%d", ++i);
+			} else if (type == GATEWAY && i == 0) {
+				++i;
+			} else {
+				snprintf(sub_str, sizeof(sub_str), "%d", i++);
 			}
 
 
@@ -1191,17 +1188,13 @@
 				snprintf(str, sizeof(str), "%s",  "DNS");
 				break;
 			}
-			if ((j != 0) || (type == DNS)) {
-				if (type != DNS) {
-					snprintf(sub_str, sizeof(sub_str),
-						"_%d", j++);
-				} else {
-					snprintf(sub_str, sizeof(sub_str),
-						"%d", ++i);
-				}
-			} else if (type == DNS) {
-				snprintf(sub_str, sizeof(sub_str),
-					"%d", ++i);
+
+			if (type == DNS) {
+				snprintf(sub_str, sizeof(sub_str), "%d", ++i);
+			} else if (j == 0) {
+				++j;
+			} else {
+				snprintf(sub_str, sizeof(sub_str), "_%d", j++);
 			}
 		} else {
 			return  HV_INVALIDARG;
@@ -1244,18 +1237,19 @@
 	 * Here is the format of the ip configuration file:
 	 *
 	 * HWADDR=macaddr
-	 * IF_NAME=interface name
-	 * DHCP=yes (This is optional; if yes, DHCP is configured)
+	 * DEVICE=interface name
+	 * BOOTPROTO=<protocol> (where <protocol> is "dhcp" if DHCP is configured
+	 *                       or "none" if no boot-time protocol should be used)
 	 *
-	 * IPADDR=ipaddr1
-	 * IPADDR_1=ipaddr2
-	 * IPADDR_x=ipaddry (where y = x + 1)
+	 * IPADDR0=ipaddr1
+	 * IPADDR1=ipaddr2
+	 * IPADDRx=ipaddry (where y = x + 1)
 	 *
-	 * NETMASK=netmask1
-	 * NETMASK_x=netmasky (where y = x + 1)
+	 * NETMASK0=netmask1
+	 * NETMASKx=netmasky (where y = x + 1)
 	 *
 	 * GATEWAY=ipaddr1
-	 * GATEWAY_x=ipaddry (where y = x + 1)
+	 * GATEWAYx=ipaddry (where y = x + 1)
 	 *
 	 * DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc)
 	 *
@@ -1271,7 +1265,7 @@
 	 */
 
 	snprintf(if_file, sizeof(if_file), "%s%s%s", KVP_CONFIG_LOC,
-		"hyperv/ifcfg-", if_name);
+		"/ifcfg-", if_name);
 
 	file = fopen(if_file, "w");
 
@@ -1294,12 +1288,12 @@
 	if (error)
 		goto setval_error;
 
-	error = kvp_write_file(file, "IF_NAME", "", if_name);
+	error = kvp_write_file(file, "DEVICE", "", if_name);
 	if (error)
 		goto setval_error;
 
 	if (new_val->dhcp_enabled) {
-		error = kvp_write_file(file, "DHCP", "", "yes");
+		error = kvp_write_file(file, "BOOTPROTO", "", "dhcp");
 		if (error)
 			goto setval_error;
 
@@ -1307,6 +1301,11 @@
 		 * We are done!.
 		 */
 		goto setval_done;
+
+	} else {
+		error = kvp_write_file(file, "BOOTPROTO", "", "none");
+		if (error)
+			goto setval_error;
 	}
 
 	/*
diff --git a/tools/hv/hv_set_ifconfig.sh b/tools/hv/hv_set_ifconfig.sh
index 3e9427e..735aafd 100755
--- a/tools/hv/hv_set_ifconfig.sh
+++ b/tools/hv/hv_set_ifconfig.sh
@@ -20,18 +20,19 @@
 # Here is the format of the ip configuration file:
 #
 # HWADDR=macaddr
-# IF_NAME=interface name
-# DHCP=yes (This is optional; if yes, DHCP is configured)
+# DEVICE=interface name
+# BOOTPROTO=<protocol> (where <protocol> is "dhcp" if DHCP is configured
+#                       or "none" if no boot-time protocol should be used)
 #
-# IPADDR=ipaddr1
-# IPADDR_1=ipaddr2
-# IPADDR_x=ipaddry (where y = x + 1)
+# IPADDR0=ipaddr1
+# IPADDR1=ipaddr2
+# IPADDRx=ipaddry (where y = x + 1)
 #
-# NETMASK=netmask1
-# NETMASK_x=netmasky (where y = x + 1)
+# NETMASK0=netmask1
+# NETMASKx=netmasky (where y = x + 1)
 #
 # GATEWAY=ipaddr1
-# GATEWAY_x=ipaddry (where y = x + 1)
+# GATEWAYx=ipaddry (where y = x + 1)
 #
 # DNSx=ipaddrx (where first DNS address is tagged as DNS1 etc)
 #
@@ -53,11 +54,6 @@
 echo "PEERDNS=yes" >> $1
 echo "ONBOOT=yes" >> $1
 
-dhcp=$(grep "DHCP" $1 2>/dev/null)
-if [ "$dhcp" != "" ];
-then
-echo "BOOTPROTO=dhcp" >> $1;
-fi
 
 cp $1 /etc/sysconfig/network-scripts/
 
@@ -65,4 +61,4 @@
 interface=$(echo $1 | awk -F - '{ print $2 }')
 
 /sbin/ifdown $interface 2>/dev/null
-/sbin/ifup $interfac 2>/dev/null
+/sbin/ifup $interface 2>/dev/null