Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (32 commits)
  ACPI: i2c-scmi: don't use acpi_device_uid()
  ACPI: simplify building device HID/CID list
  ACPI: remove acpi_device_uid() and related stuff
  ACPI: remove acpi_device.flags.hardware_id
  ACPI: remove acpi_device.flags.compatible_ids
  ACPI: maintain a single list of _HID and _CID IDs
  ACPI: make sure every acpi_device has an ID
  ACPI: use acpi_device_hid() when possible
  ACPI: fix synthetic HID for \_SB_
  ACPI: handle re-enumeration, when acpi_devices might already exist
  ACPI: factor out device type and status checking
  ACPI: add acpi_bus_get_status_handle()
  ACPI: use acpi_walk_namespace() to enumerate devices
  ACPI: identify device tree root by null parent pointer, not ACPI_BUS_TYPE
  ACPI: enumerate namespace before adding functional fixed hardware devices
  ACPI: convert acpi_bus_scan() to operate on an acpi_handle
  ACPI: add acpi_bus_get_parent() and remove "parent" arguments
  ACPI: remove unnecessary argument checking
  ACPI: remove redundant "type" arguments
  ACPI: remove acpi_device_set_context() "type" argument
  ...
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index e17e3c3..ac34c0d 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -83,6 +83,8 @@
 	ldr	r0, [sp]
 	strex	r1, r2, [sp]			@ clear the exclusive monitor
 	ldmib	sp, {r1 - pc}^			@ load r1 - pc, cpsr
+#else
+	ldmia	sp, {r0 - pc}^			@ load r0 - pc, cpsr
 #endif
 	.endm
 
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 6cdbf7e..9d83d3b 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -258,8 +258,6 @@
 static inline u32 vio_dring_avail(struct vio_dring_state *dr,
 				  unsigned int ring_size)
 {
-	MAYBE_BUILD_BUG_ON(!is_power_of_2(ring_size));
-
 	return (dr->pending -
 		((dr->prod - dr->cons) & (ring_size - 1)));
 }
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
new file mode 100644
index 0000000..f67ae28
--- /dev/null
+++ b/drivers/net/can/at91_can.c
@@ -0,0 +1,1186 @@
+/*
+ * at91_can.c - CAN network driver for AT91 SoC CAN controller
+ *
+ * (C) 2007 by Hans J. Koch <hjk@linutronix.de>
+ * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
+ *
+ * This software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2 as distributed in the 'COPYING'
+ * file from the main directory of the linux kernel source.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ *
+ * Your platform definition file should specify something like:
+ *
+ * static struct at91_can_data ek_can_data = {
+ *	transceiver_switch = sam9263ek_transceiver_switch,
+ * };
+ *
+ * at91_add_device_can(&ek_can_data);
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+
+#include <mach/board.h>
+
+#define DRV_NAME		"at91_can"
+#define AT91_NAPI_WEIGHT	12
+
+/*
+ * RX/TX Mailbox split
+ * don't dare to touch
+ */
+#define AT91_MB_RX_NUM		12
+#define AT91_MB_TX_SHIFT	2
+
+#define AT91_MB_RX_FIRST	0
+#define AT91_MB_RX_LAST		(AT91_MB_RX_FIRST + AT91_MB_RX_NUM - 1)
+
+#define AT91_MB_RX_MASK(i)	((1 << (i)) - 1)
+#define AT91_MB_RX_SPLIT	8
+#define AT91_MB_RX_LOW_LAST	(AT91_MB_RX_SPLIT - 1)
+#define AT91_MB_RX_LOW_MASK	(AT91_MB_RX_MASK(AT91_MB_RX_SPLIT))
+
+#define AT91_MB_TX_NUM		(1 << AT91_MB_TX_SHIFT)
+#define AT91_MB_TX_FIRST	(AT91_MB_RX_LAST + 1)
+#define AT91_MB_TX_LAST		(AT91_MB_TX_FIRST + AT91_MB_TX_NUM - 1)
+
+#define AT91_NEXT_PRIO_SHIFT	(AT91_MB_TX_SHIFT)
+#define AT91_NEXT_PRIO_MASK	(0xf << AT91_MB_TX_SHIFT)
+#define AT91_NEXT_MB_MASK	(AT91_MB_TX_NUM - 1)
+#define AT91_NEXT_MASK		((AT91_MB_TX_NUM - 1) | AT91_NEXT_PRIO_MASK)
+
+/* Common registers */
+enum at91_reg {
+	AT91_MR		= 0x000,
+	AT91_IER	= 0x004,
+	AT91_IDR	= 0x008,
+	AT91_IMR	= 0x00C,
+	AT91_SR		= 0x010,
+	AT91_BR		= 0x014,
+	AT91_TIM	= 0x018,
+	AT91_TIMESTP	= 0x01C,
+	AT91_ECR	= 0x020,
+	AT91_TCR	= 0x024,
+	AT91_ACR	= 0x028,
+};
+
+/* Mailbox registers (0 <= i <= 15) */
+#define AT91_MMR(i)		(enum at91_reg)(0x200 + ((i) * 0x20))
+#define AT91_MAM(i)		(enum at91_reg)(0x204 + ((i) * 0x20))
+#define AT91_MID(i)		(enum at91_reg)(0x208 + ((i) * 0x20))
+#define AT91_MFID(i)		(enum at91_reg)(0x20C + ((i) * 0x20))
+#define AT91_MSR(i)		(enum at91_reg)(0x210 + ((i) * 0x20))
+#define AT91_MDL(i)		(enum at91_reg)(0x214 + ((i) * 0x20))
+#define AT91_MDH(i)		(enum at91_reg)(0x218 + ((i) * 0x20))
+#define AT91_MCR(i)		(enum at91_reg)(0x21C + ((i) * 0x20))
+
+/* Register bits */
+#define AT91_MR_CANEN		BIT(0)
+#define AT91_MR_LPM		BIT(1)
+#define AT91_MR_ABM		BIT(2)
+#define AT91_MR_OVL		BIT(3)
+#define AT91_MR_TEOF		BIT(4)
+#define AT91_MR_TTM		BIT(5)
+#define AT91_MR_TIMFRZ		BIT(6)
+#define AT91_MR_DRPT		BIT(7)
+
+#define AT91_SR_RBSY		BIT(29)
+
+#define AT91_MMR_PRIO_SHIFT	(16)
+
+#define AT91_MID_MIDE		BIT(29)
+
+#define AT91_MSR_MRTR		BIT(20)
+#define AT91_MSR_MABT		BIT(22)
+#define AT91_MSR_MRDY		BIT(23)
+#define AT91_MSR_MMI		BIT(24)
+
+#define AT91_MCR_MRTR		BIT(20)
+#define AT91_MCR_MTCR		BIT(23)
+
+/* Mailbox Modes */
+enum at91_mb_mode {
+	AT91_MB_MODE_DISABLED	= 0,
+	AT91_MB_MODE_RX		= 1,
+	AT91_MB_MODE_RX_OVRWR	= 2,
+	AT91_MB_MODE_TX		= 3,
+	AT91_MB_MODE_CONSUMER	= 4,
+	AT91_MB_MODE_PRODUCER	= 5,
+};
+
+/* Interrupt mask bits */
+#define AT91_IRQ_MB_RX		((1 << (AT91_MB_RX_LAST + 1)) \
+				 - (1 << AT91_MB_RX_FIRST))
+#define AT91_IRQ_MB_TX		((1 << (AT91_MB_TX_LAST + 1)) \
+				 - (1 << AT91_MB_TX_FIRST))
+#define AT91_IRQ_MB_ALL		(AT91_IRQ_MB_RX | AT91_IRQ_MB_TX)
+
+#define AT91_IRQ_ERRA		(1 << 16)
+#define AT91_IRQ_WARN		(1 << 17)
+#define AT91_IRQ_ERRP		(1 << 18)
+#define AT91_IRQ_BOFF		(1 << 19)
+#define AT91_IRQ_SLEEP		(1 << 20)
+#define AT91_IRQ_WAKEUP		(1 << 21)
+#define AT91_IRQ_TOVF		(1 << 22)
+#define AT91_IRQ_TSTP		(1 << 23)
+#define AT91_IRQ_CERR		(1 << 24)
+#define AT91_IRQ_SERR		(1 << 25)
+#define AT91_IRQ_AERR		(1 << 26)
+#define AT91_IRQ_FERR		(1 << 27)
+#define AT91_IRQ_BERR		(1 << 28)
+
+#define AT91_IRQ_ERR_ALL	(0x1fff0000)
+#define AT91_IRQ_ERR_FRAME	(AT91_IRQ_CERR | AT91_IRQ_SERR | \
+				 AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR)
+#define AT91_IRQ_ERR_LINE	(AT91_IRQ_ERRA | AT91_IRQ_WARN | \
+				 AT91_IRQ_ERRP | AT91_IRQ_BOFF)
+
+#define AT91_IRQ_ALL		(0x1fffffff)
+
+struct at91_priv {
+	struct can_priv		can;	   /* must be the first member! */
+	struct net_device	*dev;
+	struct napi_struct	napi;
+
+	void __iomem		*reg_base;
+
+	u32			reg_sr;
+	unsigned int		tx_next;
+	unsigned int		tx_echo;
+	unsigned int		rx_next;
+
+	struct clk		*clk;
+	struct at91_can_data	*pdata;
+};
+
+static struct can_bittiming_const at91_bittiming_const = {
+	.tseg1_min	= 4,
+	.tseg1_max	= 16,
+	.tseg2_min	= 2,
+	.tseg2_max	= 8,
+	.sjw_max	= 4,
+	.brp_min 	= 2,
+	.brp_max	= 128,
+	.brp_inc	= 1,
+};
+
+static inline int get_tx_next_mb(const struct at91_priv *priv)
+{
+	return (priv->tx_next & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+}
+
+static inline int get_tx_next_prio(const struct at91_priv *priv)
+{
+	return (priv->tx_next >> AT91_NEXT_PRIO_SHIFT) & 0xf;
+}
+
+static inline int get_tx_echo_mb(const struct at91_priv *priv)
+{
+	return (priv->tx_echo & AT91_NEXT_MB_MASK) + AT91_MB_TX_FIRST;
+}
+
+static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg)
+{
+	return readl(priv->reg_base + reg);
+}
+
+static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg,
+		u32 value)
+{
+	writel(value, priv->reg_base + reg);
+}
+
+static inline void set_mb_mode_prio(const struct at91_priv *priv,
+		unsigned int mb, enum at91_mb_mode mode, int prio)
+{
+	at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16));
+}
+
+static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
+		enum at91_mb_mode mode)
+{
+	set_mb_mode_prio(priv, mb, mode, 0);
+}
+
+static struct sk_buff *alloc_can_skb(struct net_device *dev,
+		struct can_frame **cf)
+{
+	struct sk_buff *skb;
+
+	skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
+	if (unlikely(!skb))
+		return NULL;
+
+	skb->protocol = htons(ETH_P_CAN);
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	*cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
+
+	return skb;
+}
+
+static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
+		struct can_frame **cf)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_can_skb(dev, cf);
+	if (unlikely(!skb))
+		return NULL;
+
+	memset(*cf, 0, sizeof(struct can_frame));
+	(*cf)->can_id = CAN_ERR_FLAG;
+	(*cf)->can_dlc = CAN_ERR_DLC;
+
+	return skb;
+}
+
+/*
+ * Swtich transceiver on or off
+ */
+static void at91_transceiver_switch(const struct at91_priv *priv, int on)
+{
+	if (priv->pdata && priv->pdata->transceiver_switch)
+		priv->pdata->transceiver_switch(on);
+}
+
+static void at91_setup_mailboxes(struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	unsigned int i;
+
+	/*
+	 * The first 12 mailboxes are used as a reception FIFO. The
+	 * last mailbox is configured with overwrite option. The
+	 * overwrite flag indicates a FIFO overflow.
+	 */
+	for (i = AT91_MB_RX_FIRST; i < AT91_MB_RX_LAST; i++)
+		set_mb_mode(priv, i, AT91_MB_MODE_RX);
+	set_mb_mode(priv, AT91_MB_RX_LAST, AT91_MB_MODE_RX_OVRWR);
+
+	/* The last 4 mailboxes are used for transmitting. */
+	for (i = AT91_MB_TX_FIRST; i <= AT91_MB_TX_LAST; i++)
+		set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0);
+
+	/* Reset tx and rx helper pointers */
+	priv->tx_next = priv->tx_echo = priv->rx_next = 0;
+}
+
+static int at91_set_bittiming(struct net_device *dev)
+{
+	const struct at91_priv *priv = netdev_priv(dev);
+	const struct can_bittiming *bt = &priv->can.bittiming;
+	u32 reg_br;
+
+	reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) << 24) |
+		((bt->brp - 1) << 16) |	((bt->sjw - 1) << 12) |
+		((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) |
+		((bt->phase_seg2 - 1) << 0);
+
+	dev_info(dev->dev.parent, "writing AT91_BR: 0x%08x\n", reg_br);
+
+	at91_write(priv, AT91_BR, reg_br);
+
+	return 0;
+}
+
+static void at91_chip_start(struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_mr, reg_ier;
+
+	/* disable interrupts */
+	at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+
+	/* disable chip */
+	reg_mr = at91_read(priv, AT91_MR);
+	at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+
+	at91_setup_mailboxes(dev);
+	at91_transceiver_switch(priv, 1);
+
+	/* enable chip */
+	at91_write(priv, AT91_MR, AT91_MR_CANEN);
+
+	priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+	/* Enable interrupts */
+	reg_ier = AT91_IRQ_MB_RX | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME;
+	at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+	at91_write(priv, AT91_IER, reg_ier);
+}
+
+static void at91_chip_stop(struct net_device *dev, enum can_state state)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_mr;
+
+	/* disable interrupts */
+	at91_write(priv, AT91_IDR, AT91_IRQ_ALL);
+
+	reg_mr = at91_read(priv, AT91_MR);
+	at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN);
+
+	at91_transceiver_switch(priv, 0);
+	priv->can.state = state;
+}
+
+/*
+ * theory of operation:
+ *
+ * According to the datasheet priority 0 is the highest priority, 15
+ * is the lowest. If two mailboxes have the same priority level the
+ * message of the mailbox with the lowest number is sent first.
+ *
+ * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then
+ * the next mailbox with prio 0, and so on, until all mailboxes are
+ * used. Then we start from the beginning with mailbox
+ * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1
+ * prio 1. When we reach the last mailbox with prio 15, we have to
+ * stop sending, waiting for all messages to be delivered, then start
+ * again with mailbox AT91_MB_TX_FIRST prio 0.
+ *
+ * We use the priv->tx_next as counter for the next transmission
+ * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits
+ * encode the mailbox number, the upper 4 bits the mailbox priority:
+ *
+ * priv->tx_next = (prio << AT91_NEXT_PRIO_SHIFT) ||
+ *                 (mb - AT91_MB_TX_FIRST);
+ *
+ */
+static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf = (struct can_frame *)skb->data;
+	unsigned int mb, prio;
+	u32 reg_mid, reg_mcr;
+
+	mb = get_tx_next_mb(priv);
+	prio = get_tx_next_prio(priv);
+
+	if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) {
+		netif_stop_queue(dev);
+
+		dev_err(dev->dev.parent,
+			"BUG! TX buffer full when queue awake!\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	if (cf->can_id & CAN_EFF_FLAG)
+		reg_mid = (cf->can_id & CAN_EFF_MASK) | AT91_MID_MIDE;
+	else
+		reg_mid = (cf->can_id & CAN_SFF_MASK) << 18;
+
+	reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) |
+		(cf->can_dlc << 16) | AT91_MCR_MTCR;
+
+	/* disable MB while writing ID (see datasheet) */
+	set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED);
+	at91_write(priv, AT91_MID(mb), reg_mid);
+	set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio);
+
+	at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0));
+	at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4));
+
+	/* This triggers transmission */
+	at91_write(priv, AT91_MCR(mb), reg_mcr);
+
+	stats->tx_bytes += cf->can_dlc;
+	dev->trans_start = jiffies;
+
+	/* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
+	can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
+
+	/*
+	 * we have to stop the queue and deliver all messages in case
+	 * of a prio+mb counter wrap around. This is the case if
+	 * tx_next buffer prio and mailbox equals 0.
+	 *
+	 * also stop the queue if next buffer is still in use
+	 * (== not ready)
+	 */
+	priv->tx_next++;
+	if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) &
+	      AT91_MSR_MRDY) ||
+	    (priv->tx_next & AT91_NEXT_MASK) == 0)
+		netif_stop_queue(dev);
+
+	/* Enable interrupt for this mailbox */
+	at91_write(priv, AT91_IER, 1 << mb);
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * at91_activate_rx_low - activate lower rx mailboxes
+ * @priv: a91 context
+ *
+ * Reenables the lower mailboxes for reception of new CAN messages
+ */
+static inline void at91_activate_rx_low(const struct at91_priv *priv)
+{
+	u32 mask = AT91_MB_RX_LOW_MASK;
+	at91_write(priv, AT91_TCR, mask);
+}
+
+/**
+ * at91_activate_rx_mb - reactive single rx mailbox
+ * @priv: a91 context
+ * @mb: mailbox to reactivate
+ *
+ * Reenables given mailbox for reception of new CAN messages
+ */
+static inline void at91_activate_rx_mb(const struct at91_priv *priv,
+		unsigned int mb)
+{
+	u32 mask = 1 << mb;
+	at91_write(priv, AT91_TCR, mask);
+}
+
+/**
+ * at91_rx_overflow_err - send error frame due to rx overflow
+ * @dev: net device
+ */
+static void at91_rx_overflow_err(struct net_device *dev)
+{
+	struct net_device_stats *stats = &dev->stats;
+	struct sk_buff *skb;
+	struct can_frame *cf;
+
+	dev_dbg(dev->dev.parent, "RX buffer overflow\n");
+	stats->rx_over_errors++;
+	stats->rx_errors++;
+
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return;
+
+	cf->can_id |= CAN_ERR_CRTL;
+	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+	netif_receive_skb(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+}
+
+/**
+ * at91_read_mb - read CAN msg from mailbox (lowlevel impl)
+ * @dev: net device
+ * @mb: mailbox number to read from
+ * @cf: can frame where to store message
+ *
+ * Reads a CAN message from the given mailbox and stores data into
+ * given can frame. "mb" and "cf" must be valid.
+ */
+static void at91_read_mb(struct net_device *dev, unsigned int mb,
+		struct can_frame *cf)
+{
+	const struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_msr, reg_mid;
+
+	reg_mid = at91_read(priv, AT91_MID(mb));
+	if (reg_mid & AT91_MID_MIDE)
+		cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+	else
+		cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK;
+
+	reg_msr = at91_read(priv, AT91_MSR(mb));
+	if (reg_msr & AT91_MSR_MRTR)
+		cf->can_id |= CAN_RTR_FLAG;
+	cf->can_dlc = min_t(__u8, (reg_msr >> 16) & 0xf, 8);
+
+	*(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb));
+	*(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb));
+
+	if (unlikely(mb == AT91_MB_RX_LAST && reg_msr & AT91_MSR_MMI))
+		at91_rx_overflow_err(dev);
+}
+
+/**
+ * at91_read_msg - read CAN message from mailbox
+ * @dev: net device
+ * @mb: mail box to read from
+ *
+ * Reads a CAN message from given mailbox, and put into linux network
+ * RX queue, does all housekeeping chores (stats, ...)
+ */
+static void at91_read_msg(struct net_device *dev, unsigned int mb)
+{
+	struct net_device_stats *stats = &dev->stats;
+	struct can_frame *cf;
+	struct sk_buff *skb;
+
+	skb = alloc_can_skb(dev, &cf);
+	if (unlikely(!skb)) {
+		stats->rx_dropped++;
+		return;
+	}
+
+	at91_read_mb(dev, mb, cf);
+	netif_receive_skb(skb);
+
+	stats->rx_packets++;
+	stats->rx_bytes += cf->can_dlc;
+}
+
+/**
+ * at91_poll_rx - read multiple CAN messages from mailboxes
+ * @dev: net device
+ * @quota: max number of pkgs we're allowed to receive
+ *
+ * Theory of Operation:
+ *
+ * 12 of the 16 mailboxes on the chip are reserved for RX. we split
+ * them into 2 groups. The lower group holds 8 and upper 4 mailboxes.
+ *
+ * Like it or not, but the chip always saves a received CAN message
+ * into the first free mailbox it finds (starting with the
+ * lowest). This makes it very difficult to read the messages in the
+ * right order from the chip. This is how we work around that problem:
+ *
+ * The first message goes into mb nr. 0 and issues an interrupt. All
+ * rx ints are disabled in the interrupt handler and a napi poll is
+ * scheduled. We read the mailbox, but do _not_ reenable the mb (to
+ * receive another message).
+ *
+ *    lower mbxs      upper
+ *   ______^______    __^__
+ *  /             \  /     \
+ * +-+-+-+-+-+-+-+-++-+-+-+-+
+ * |x|x|x|x|x|x|x|x|| | | | |
+ * +-+-+-+-+-+-+-+-++-+-+-+-+
+ *  0 0 0 0 0 0  0 0 0 0 1 1  \ mail
+ *  0 1 2 3 4 5  6 7 8 9 0 1  / box
+ *
+ * The variable priv->rx_next points to the next mailbox to read a
+ * message from. As long we're in the lower mailboxes we just read the
+ * mailbox but not reenable it.
+ *
+ * With completion of the last of the lower mailboxes, we reenable the
+ * whole first group, but continue to look for filled mailboxes in the
+ * upper mailboxes. Imagine the second group like overflow mailboxes,
+ * which takes CAN messages if the lower goup is full. While in the
+ * upper group we reenable the mailbox right after reading it. Giving
+ * the chip more room to store messages.
+ *
+ * After finishing we look again in the lower group if we've still
+ * quota.
+ *
+ */
+static int at91_poll_rx(struct net_device *dev, int quota)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_sr = at91_read(priv, AT91_SR);
+	const unsigned long *addr = (unsigned long *)&reg_sr;
+	unsigned int mb;
+	int received = 0;
+
+	if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
+	    reg_sr & AT91_MB_RX_LOW_MASK)
+		dev_info(dev->dev.parent,
+			 "order of incoming frames cannot be guaranteed\n");
+
+ again:
+	for (mb = find_next_bit(addr, AT91_MB_RX_NUM, priv->rx_next);
+	     mb < AT91_MB_RX_NUM && quota > 0;
+	     reg_sr = at91_read(priv, AT91_SR),
+	     mb = find_next_bit(addr, AT91_MB_RX_NUM, ++priv->rx_next)) {
+		at91_read_msg(dev, mb);
+
+		/* reactivate mailboxes */
+		if (mb == AT91_MB_RX_LOW_LAST)
+			/* all lower mailboxed, if just finished it */
+			at91_activate_rx_low(priv);
+		else if (mb > AT91_MB_RX_LOW_LAST)
+			/* only the mailbox we read */
+			at91_activate_rx_mb(priv, mb);
+
+		received++;
+		quota--;
+	}
+
+	/* upper group completed, look again in lower */
+	if (priv->rx_next > AT91_MB_RX_LOW_LAST &&
+	    quota > 0 && mb >= AT91_MB_RX_NUM) {
+		priv->rx_next = 0;
+		goto again;
+	}
+
+	return received;
+}
+
+static void at91_poll_err_frame(struct net_device *dev,
+		struct can_frame *cf, u32 reg_sr)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+
+	/* CRC error */
+	if (reg_sr & AT91_IRQ_CERR) {
+		dev_dbg(dev->dev.parent, "CERR irq\n");
+		dev->stats.rx_errors++;
+		priv->can.can_stats.bus_error++;
+		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+	}
+
+	/* Stuffing Error */
+	if (reg_sr & AT91_IRQ_SERR) {
+		dev_dbg(dev->dev.parent, "SERR irq\n");
+		dev->stats.rx_errors++;
+		priv->can.can_stats.bus_error++;
+		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+		cf->data[2] |= CAN_ERR_PROT_STUFF;
+	}
+
+	/* Acknowledgement Error */
+	if (reg_sr & AT91_IRQ_AERR) {
+		dev_dbg(dev->dev.parent, "AERR irq\n");
+		dev->stats.tx_errors++;
+		cf->can_id |= CAN_ERR_ACK;
+	}
+
+	/* Form error */
+	if (reg_sr & AT91_IRQ_FERR) {
+		dev_dbg(dev->dev.parent, "FERR irq\n");
+		dev->stats.rx_errors++;
+		priv->can.can_stats.bus_error++;
+		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+		cf->data[2] |= CAN_ERR_PROT_FORM;
+	}
+
+	/* Bit Error */
+	if (reg_sr & AT91_IRQ_BERR) {
+		dev_dbg(dev->dev.parent, "BERR irq\n");
+		dev->stats.tx_errors++;
+		priv->can.can_stats.bus_error++;
+		cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+		cf->data[2] |= CAN_ERR_PROT_BIT;
+	}
+}
+
+static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
+{
+	struct sk_buff *skb;
+	struct can_frame *cf;
+
+	if (quota == 0)
+		return 0;
+
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return 0;
+
+	at91_poll_err_frame(dev, cf, reg_sr);
+	netif_receive_skb(skb);
+
+	dev->last_rx = jiffies;
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += cf->can_dlc;
+
+	return 1;
+}
+
+static int at91_poll(struct napi_struct *napi, int quota)
+{
+	struct net_device *dev = napi->dev;
+	const struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_sr = at91_read(priv, AT91_SR);
+	int work_done = 0;
+
+	if (reg_sr & AT91_IRQ_MB_RX)
+		work_done += at91_poll_rx(dev, quota - work_done);
+
+	/*
+	 * The error bits are clear on read,
+	 * so use saved value from irq handler.
+	 */
+	reg_sr |= priv->reg_sr;
+	if (reg_sr & AT91_IRQ_ERR_FRAME)
+		work_done += at91_poll_err(dev, quota - work_done, reg_sr);
+
+	if (work_done < quota) {
+		/* enable IRQs for frame errors and all mailboxes >= rx_next */
+		u32 reg_ier = AT91_IRQ_ERR_FRAME;
+		reg_ier |= AT91_IRQ_MB_RX & ~AT91_MB_RX_MASK(priv->rx_next);
+
+		napi_complete(napi);
+		at91_write(priv, AT91_IER, reg_ier);
+	}
+
+	return work_done;
+}
+
+/*
+ * theory of operation:
+ *
+ * priv->tx_echo holds the number of the oldest can_frame put for
+ * transmission into the hardware, but not yet ACKed by the CAN tx
+ * complete IRQ.
+ *
+ * We iterate from priv->tx_echo to priv->tx_next and check if the
+ * packet has been transmitted, echo it back to the CAN framework. If
+ * we discover a not yet transmitted package, stop looking for more.
+ *
+ */
+static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_msr;
+	unsigned int mb;
+
+	/* masking of reg_sr not needed, already done by at91_irq */
+
+	for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
+		mb = get_tx_echo_mb(priv);
+
+		/* no event in mailbox? */
+		if (!(reg_sr & (1 << mb)))
+			break;
+
+		/* Disable irq for this TX mailbox */
+		at91_write(priv, AT91_IDR, 1 << mb);
+
+		/*
+		 * only echo if mailbox signals us a transfer
+		 * complete (MSR_MRDY). Otherwise it's a tansfer
+		 * abort. "can_bus_off()" takes care about the skbs
+		 * parked in the echo queue.
+		 */
+		reg_msr = at91_read(priv, AT91_MSR(mb));
+		if (likely(reg_msr & AT91_MSR_MRDY &&
+			   ~reg_msr & AT91_MSR_MABT)) {
+			/* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
+			can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST);
+			dev->stats.tx_packets++;
+		}
+	}
+
+	/*
+	 * restart queue if we don't have a wrap around but restart if
+	 * we get a TX int for the last can frame directly before a
+	 * wrap around.
+	 */
+	if ((priv->tx_next & AT91_NEXT_MASK) != 0 ||
+	    (priv->tx_echo & AT91_NEXT_MASK) == 0)
+		netif_wake_queue(dev);
+}
+
+static void at91_irq_err_state(struct net_device *dev,
+		struct can_frame *cf, enum can_state new_state)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	u32 reg_idr, reg_ier, reg_ecr;
+	u8 tec, rec;
+
+	reg_ecr = at91_read(priv, AT91_ECR);
+	rec = reg_ecr & 0xff;
+	tec = reg_ecr >> 16;
+
+	switch (priv->can.state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		/*
+		 * from: ERROR_ACTIVE
+		 * to  : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
+		 * =>  : there was a warning int
+		 */
+		if (new_state >= CAN_STATE_ERROR_WARNING &&
+		    new_state <= CAN_STATE_BUS_OFF) {
+			dev_dbg(dev->dev.parent, "Error Warning IRQ\n");
+			priv->can.can_stats.error_warning++;
+
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (tec > rec) ?
+				CAN_ERR_CRTL_TX_WARNING :
+				CAN_ERR_CRTL_RX_WARNING;
+		}
+	case CAN_STATE_ERROR_WARNING:	/* fallthrough */
+		/*
+		 * from: ERROR_ACTIVE, ERROR_WARNING
+		 * to  : ERROR_PASSIVE, BUS_OFF
+		 * =>  : error passive int
+		 */
+		if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+		    new_state <= CAN_STATE_BUS_OFF) {
+			dev_dbg(dev->dev.parent, "Error Passive IRQ\n");
+			priv->can.can_stats.error_passive++;
+
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (tec > rec) ?
+				CAN_ERR_CRTL_TX_PASSIVE :
+				CAN_ERR_CRTL_RX_PASSIVE;
+		}
+		break;
+	case CAN_STATE_BUS_OFF:
+		/*
+		 * from: BUS_OFF
+		 * to  : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE
+		 */
+		if (new_state <= CAN_STATE_ERROR_PASSIVE) {
+			cf->can_id |= CAN_ERR_RESTARTED;
+
+			dev_dbg(dev->dev.parent, "restarted\n");
+			priv->can.can_stats.restarts++;
+
+			netif_carrier_on(dev);
+			netif_wake_queue(dev);
+		}
+		break;
+	default:
+		break;
+	}
+
+
+	/* process state changes depending on the new state */
+	switch (new_state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		/*
+		 * actually we want to enable AT91_IRQ_WARN here, but
+		 * it screws up the system under certain
+		 * circumstances. so just enable AT91_IRQ_ERRP, thus
+		 * the "fallthrough"
+		 */
+		dev_dbg(dev->dev.parent, "Error Active\n");
+		cf->can_id |= CAN_ERR_PROT;
+		cf->data[2] = CAN_ERR_PROT_ACTIVE;
+	case CAN_STATE_ERROR_WARNING:	/* fallthrough */
+		reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF;
+		reg_ier = AT91_IRQ_ERRP;
+		break;
+	case CAN_STATE_ERROR_PASSIVE:
+		reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP;
+		reg_ier = AT91_IRQ_BOFF;
+		break;
+	case CAN_STATE_BUS_OFF:
+		reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP |
+			AT91_IRQ_WARN | AT91_IRQ_BOFF;
+		reg_ier = 0;
+
+		cf->can_id |= CAN_ERR_BUSOFF;
+
+		dev_dbg(dev->dev.parent, "bus-off\n");
+		netif_carrier_off(dev);
+		priv->can.can_stats.bus_off++;
+
+		/* turn off chip, if restart is disabled */
+		if (!priv->can.restart_ms) {
+			at91_chip_stop(dev, CAN_STATE_BUS_OFF);
+			return;
+		}
+		break;
+	default:
+		break;
+	}
+
+	at91_write(priv, AT91_IDR, reg_idr);
+	at91_write(priv, AT91_IER, reg_ier);
+}
+
+static void at91_irq_err(struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	struct sk_buff *skb;
+	struct can_frame *cf;
+	enum can_state new_state;
+	u32 reg_sr;
+
+	reg_sr = at91_read(priv, AT91_SR);
+
+	/* we need to look at the unmasked reg_sr */
+	if (unlikely(reg_sr & AT91_IRQ_BOFF))
+		new_state = CAN_STATE_BUS_OFF;
+	else if (unlikely(reg_sr & AT91_IRQ_ERRP))
+		new_state = CAN_STATE_ERROR_PASSIVE;
+	else if (unlikely(reg_sr & AT91_IRQ_WARN))
+		new_state = CAN_STATE_ERROR_WARNING;
+	else if (likely(reg_sr & AT91_IRQ_ERRA))
+		new_state = CAN_STATE_ERROR_ACTIVE;
+	else {
+		dev_err(dev->dev.parent, "BUG! hardware in undefined state\n");
+		return;
+	}
+
+	/* state hasn't changed */
+	if (likely(new_state == priv->can.state))
+		return;
+
+	skb = alloc_can_err_skb(dev, &cf);
+	if (unlikely(!skb))
+		return;
+
+	at91_irq_err_state(dev, cf, new_state);
+	netif_rx(skb);
+
+	dev->last_rx = jiffies;
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += cf->can_dlc;
+
+	priv->can.state = new_state;
+}
+
+/*
+ * interrupt handler
+ */
+static irqreturn_t at91_irq(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct at91_priv *priv = netdev_priv(dev);
+	irqreturn_t handled = IRQ_NONE;
+	u32 reg_sr, reg_imr;
+
+	reg_sr = at91_read(priv, AT91_SR);
+	reg_imr = at91_read(priv, AT91_IMR);
+
+	/* Ignore masked interrupts */
+	reg_sr &= reg_imr;
+	if (!reg_sr)
+		goto exit;
+
+	handled = IRQ_HANDLED;
+
+	/* Receive or error interrupt? -> napi */
+	if (reg_sr & (AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME)) {
+		/*
+		 * The error bits are clear on read,
+		 * save for later use.
+		 */
+		priv->reg_sr = reg_sr;
+		at91_write(priv, AT91_IDR,
+			   AT91_IRQ_MB_RX | AT91_IRQ_ERR_FRAME);
+		napi_schedule(&priv->napi);
+	}
+
+	/* Transmission complete interrupt */
+	if (reg_sr & AT91_IRQ_MB_TX)
+		at91_irq_tx(dev, reg_sr);
+
+	at91_irq_err(dev);
+
+ exit:
+	return handled;
+}
+
+static int at91_open(struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+	int err;
+
+	clk_enable(priv->clk);
+
+	/* check or determine and set bittime */
+	err = open_candev(dev);
+	if (err)
+		goto out;
+
+	/* register interrupt handler */
+	if (request_irq(dev->irq, at91_irq, IRQF_SHARED,
+			dev->name, dev)) {
+		err = -EAGAIN;
+		goto out_close;
+	}
+
+	/* start chip and queuing */
+	at91_chip_start(dev);
+	napi_enable(&priv->napi);
+	netif_start_queue(dev);
+
+	return 0;
+
+ out_close:
+	close_candev(dev);
+ out:
+	clk_disable(priv->clk);
+
+	return err;
+}
+
+/*
+ * stop CAN bus activity
+ */
+static int at91_close(struct net_device *dev)
+{
+	struct at91_priv *priv = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+	napi_disable(&priv->napi);
+	at91_chip_stop(dev, CAN_STATE_STOPPED);
+
+	free_irq(dev->irq, dev);
+	clk_disable(priv->clk);
+
+	close_candev(dev);
+
+	return 0;
+}
+
+static int at91_set_mode(struct net_device *dev, enum can_mode mode)
+{
+	switch (mode) {
+	case CAN_MODE_START:
+		at91_chip_start(dev);
+		netif_wake_queue(dev);
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+static const struct net_device_ops at91_netdev_ops = {
+	.ndo_open	= at91_open,
+	.ndo_stop	= at91_close,
+	.ndo_start_xmit	= at91_start_xmit,
+};
+
+static int __init at91_can_probe(struct platform_device *pdev)
+{
+	struct net_device *dev;
+	struct at91_priv *priv;
+	struct resource *res;
+	struct clk *clk;
+	void __iomem *addr;
+	int err, irq;
+
+	clk = clk_get(&pdev->dev, "can_clk");
+	if (IS_ERR(clk)) {
+		dev_err(&pdev->dev, "no clock defined\n");
+		err = -ENODEV;
+		goto exit;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (!res || !irq) {
+		err = -ENODEV;
+		goto exit_put;
+	}
+
+	if (!request_mem_region(res->start,
+				resource_size(res),
+				pdev->name)) {
+		err = -EBUSY;
+		goto exit_put;
+	}
+
+	addr = ioremap_nocache(res->start, resource_size(res));
+	if (!addr) {
+		err = -ENOMEM;
+		goto exit_release;
+	}
+
+	dev = alloc_candev(sizeof(struct at91_priv));
+	if (!dev) {
+		err = -ENOMEM;
+		goto exit_iounmap;
+	}
+
+	dev->netdev_ops	= &at91_netdev_ops;
+	dev->irq = irq;
+	dev->flags |= IFF_ECHO;
+
+	priv = netdev_priv(dev);
+	priv->can.clock.freq = clk_get_rate(clk);
+	priv->can.bittiming_const = &at91_bittiming_const;
+	priv->can.do_set_bittiming = at91_set_bittiming;
+	priv->can.do_set_mode = at91_set_mode;
+	priv->reg_base = addr;
+	priv->dev = dev;
+	priv->clk = clk;
+	priv->pdata = pdev->dev.platform_data;
+
+	netif_napi_add(dev, &priv->napi, at91_poll, AT91_NAPI_WEIGHT);
+
+	dev_set_drvdata(&pdev->dev, dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	err = register_candev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "registering netdev failed\n");
+		goto exit_free;
+	}
+
+	dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
+		 priv->reg_base, dev->irq);
+
+	return 0;
+
+ exit_free:
+	free_netdev(dev);
+ exit_iounmap:
+	iounmap(addr);
+ exit_release:
+	release_mem_region(res->start, resource_size(res));
+ exit_put:
+	clk_put(clk);
+ exit:
+	return err;
+}
+
+static int __devexit at91_can_remove(struct platform_device *pdev)
+{
+	struct net_device *dev = platform_get_drvdata(pdev);
+	struct at91_priv *priv = netdev_priv(dev);
+	struct resource *res;
+
+	unregister_netdev(dev);
+
+	platform_set_drvdata(pdev, NULL);
+
+	free_netdev(dev);
+
+	iounmap(priv->reg_base);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+
+	clk_put(priv->clk);
+
+	return 0;
+}
+
+static struct platform_driver at91_can_driver = {
+	.probe		= at91_can_probe,
+	.remove		= __devexit_p(at91_can_remove),
+	.driver		= {
+		.name	= DRV_NAME,
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init at91_can_module_init(void)
+{
+	printk(KERN_INFO "%s netdevice driver\n", DRV_NAME);
+	return platform_driver_register(&at91_can_driver);
+}
+
+static void __exit at91_can_module_exit(void)
+{
+	platform_driver_unregister(&at91_can_driver);
+	printk(KERN_INFO "%s: driver removed\n", DRV_NAME);
+}
+
+module_init(at91_can_module_init);
+module_exit(at91_can_module_exit);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(DRV_NAME " CAN netdevice driver");
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index d465eaa..65a2d0b 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -200,6 +200,9 @@
 /** NOTE:: For DM646x the IN_VECTOR has changed */
 #define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC	BIT(EMAC_DEF_RX_CH)
 #define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC	BIT(16 + EMAC_DEF_TX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT	BIT(26)
+#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT	BIT(27)
+
 
 /* CPPI bit positions */
 #define EMAC_CPPI_SOP_BIT		BIT(31)
@@ -2167,7 +2170,11 @@
 		emac_int_enable(priv);
 	}
 
-	if (unlikely(status & EMAC_DM644X_MAC_IN_VECTOR_HOST_INT)) {
+	mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
+	if (priv->version == EMAC_VERSION_2)
+		mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
+
+	if (unlikely(status & mask)) {
 		u32 ch, cause;
 		dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
 		netif_stop_queue(ndev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5c498d2..d445845 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1,4 +1,4 @@
-/* A simple network driver using virtio.
+/* A network driver using virtio.
  *
  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
  *
@@ -48,19 +48,9 @@
 	struct napi_struct napi;
 	unsigned int status;
 
-	/* The skb we couldn't send because buffers were full. */
-	struct sk_buff *last_xmit_skb;
-
-	/* If we need to free in a timer, this is it. */
-	struct timer_list xmit_free_timer;
-
 	/* Number of input buffers, and max we've ever had. */
 	unsigned int num, max;
 
-	/* For cleaning up after transmission. */
-	struct tasklet_struct tasklet;
-	bool free_in_tasklet;
-
 	/* I like... big packets and I cannot lie! */
 	bool big_packets;
 
@@ -78,9 +68,17 @@
 	struct page *pages;
 };
 
-static inline void *skb_vnet_hdr(struct sk_buff *skb)
+struct skb_vnet_hdr {
+	union {
+		struct virtio_net_hdr hdr;
+		struct virtio_net_hdr_mrg_rxbuf mhdr;
+	};
+	unsigned int num_sg;
+};
+
+static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
 {
-	return (struct virtio_net_hdr *)skb->cb;
+	return (struct skb_vnet_hdr *)skb->cb;
 }
 
 static void give_a_page(struct virtnet_info *vi, struct page *page)
@@ -119,17 +117,13 @@
 
 	/* We were probably waiting for more output buffers. */
 	netif_wake_queue(vi->dev);
-
-	/* Make sure we re-xmit last_xmit_skb: if there are no more packets
-	 * queued, start_xmit won't be called. */
-	tasklet_schedule(&vi->tasklet);
 }
 
 static void receive_skb(struct net_device *dev, struct sk_buff *skb,
 			unsigned len)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
-	struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
 	int err;
 	int i;
 
@@ -140,7 +134,6 @@
 	}
 
 	if (vi->mergeable_rx_bufs) {
-		struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
 		unsigned int copy;
 		char *p = page_address(skb_shinfo(skb)->frags[0].page);
 
@@ -148,8 +141,8 @@
 			len = PAGE_SIZE;
 		len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
 
-		memcpy(hdr, p, sizeof(*mhdr));
-		p += sizeof(*mhdr);
+		memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
+		p += sizeof(hdr->mhdr);
 
 		copy = len;
 		if (copy > skb_tailroom(skb))
@@ -164,13 +157,13 @@
 			skb_shinfo(skb)->nr_frags--;
 		} else {
 			skb_shinfo(skb)->frags[0].page_offset +=
-				sizeof(*mhdr) + copy;
+				sizeof(hdr->mhdr) + copy;
 			skb_shinfo(skb)->frags[0].size = len;
 			skb->data_len += len;
 			skb->len += len;
 		}
 
-		while (--mhdr->num_buffers) {
+		while (--hdr->mhdr.num_buffers) {
 			struct sk_buff *nskb;
 
 			i = skb_shinfo(skb)->nr_frags;
@@ -184,7 +177,7 @@
 			nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
 			if (!nskb) {
 				pr_debug("%s: rx error: %d buffers missing\n",
-					 dev->name, mhdr->num_buffers);
+					 dev->name, hdr->mhdr.num_buffers);
 				dev->stats.rx_length_errors++;
 				goto drop;
 			}
@@ -205,7 +198,7 @@
 			skb->len += len;
 		}
 	} else {
-		len -= sizeof(struct virtio_net_hdr);
+		len -= sizeof(hdr->hdr);
 
 		if (len <= MAX_PACKET_LEN)
 			trim_pages(vi, skb);
@@ -223,9 +216,11 @@
 	dev->stats.rx_bytes += skb->len;
 	dev->stats.rx_packets++;
 
-	if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 		pr_debug("Needs csum!\n");
-		if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
+		if (!skb_partial_csum_set(skb,
+					  hdr->hdr.csum_start,
+					  hdr->hdr.csum_offset))
 			goto frame_err;
 	}
 
@@ -233,9 +228,9 @@
 	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
 
-	if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
 		pr_debug("GSO!\n");
-		switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 		case VIRTIO_NET_HDR_GSO_TCPV4:
 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 			break;
@@ -248,14 +243,14 @@
 		default:
 			if (net_ratelimit())
 				printk(KERN_WARNING "%s: bad gso type %u.\n",
-				       dev->name, hdr->gso_type);
+				       dev->name, hdr->hdr.gso_type);
 			goto frame_err;
 		}
 
-		if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
 			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 
-		skb_shinfo(skb)->gso_size = hdr->gso_size;
+		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
 		if (skb_shinfo(skb)->gso_size == 0) {
 			if (net_ratelimit())
 				printk(KERN_WARNING "%s: zero gso size.\n",
@@ -285,8 +280,8 @@
 	bool oom = false;
 
 	sg_init_table(sg, 2+MAX_SKB_FRAGS);
-	for (;;) {
-		struct virtio_net_hdr *hdr;
+	do {
+		struct skb_vnet_hdr *hdr;
 
 		skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
 		if (unlikely(!skb)) {
@@ -298,7 +293,7 @@
 		skb_put(skb, MAX_PACKET_LEN);
 
 		hdr = skb_vnet_hdr(skb);
-		sg_set_buf(sg, hdr, sizeof(*hdr));
+		sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
 
 		if (vi->big_packets) {
 			for (i = 0; i < MAX_SKB_FRAGS; i++) {
@@ -328,7 +323,7 @@
 			break;
 		}
 		vi->num++;
-	}
+	} while (err >= num);
 	if (unlikely(vi->num > vi->max))
 		vi->max = vi->num;
 	vi->rvq->vq_ops->kick(vi->rvq);
@@ -346,7 +341,7 @@
 	if (!vi->mergeable_rx_bufs)
 		return try_fill_recv_maxbufs(vi, gfp);
 
-	for (;;) {
+	do {
 		skb_frag_t *f;
 
 		skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
@@ -380,7 +375,7 @@
 			break;
 		}
 		vi->num++;
-	}
+	} while (err > 0);
 	if (unlikely(vi->num > vi->max))
 		vi->max = vi->num;
 	vi->rvq->vq_ops->kick(vi->rvq);
@@ -448,42 +443,26 @@
 	return received;
 }
 
-static void free_old_xmit_skbs(struct virtnet_info *vi)
+static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
 {
 	struct sk_buff *skb;
-	unsigned int len;
+	unsigned int len, tot_sgs = 0;
 
 	while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
 		pr_debug("Sent skb %p\n", skb);
 		__skb_unlink(skb, &vi->send);
 		vi->dev->stats.tx_bytes += skb->len;
 		vi->dev->stats.tx_packets++;
+		tot_sgs += skb_vnet_hdr(skb)->num_sg;
 		kfree_skb(skb);
 	}
-}
-
-/* If the virtio transport doesn't always notify us when all in-flight packets
- * are consumed, we fall back to using this function on a timer to free them. */
-static void xmit_free(unsigned long data)
-{
-	struct virtnet_info *vi = (void *)data;
-
-	netif_tx_lock(vi->dev);
-
-	free_old_xmit_skbs(vi);
-
-	if (!skb_queue_empty(&vi->send))
-		mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
-
-	netif_tx_unlock(vi->dev);
+	return tot_sgs;
 }
 
 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
 {
-	int num, err;
 	struct scatterlist sg[2+MAX_SKB_FRAGS];
-	struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
-	struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
+	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
 
 	sg_init_table(sg, 2+MAX_SKB_FRAGS);
@@ -491,108 +470,89 @@
 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
 
 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
-		hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-		hdr->csum_start = skb->csum_start - skb_headroom(skb);
-		hdr->csum_offset = skb->csum_offset;
+		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+		hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
+		hdr->hdr.csum_offset = skb->csum_offset;
 	} else {
-		hdr->flags = 0;
-		hdr->csum_offset = hdr->csum_start = 0;
+		hdr->hdr.flags = 0;
+		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
 	}
 
 	if (skb_is_gso(skb)) {
-		hdr->hdr_len = skb_headlen(skb);
-		hdr->gso_size = skb_shinfo(skb)->gso_size;
+		hdr->hdr.hdr_len = skb_headlen(skb);
+		hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-			hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
-			hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
 		else
 			BUG();
 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
-			hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
 	} else {
-		hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
-		hdr->gso_size = hdr->hdr_len = 0;
+		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
+		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
 	}
 
-	mhdr->num_buffers = 0;
+	hdr->mhdr.num_buffers = 0;
 
 	/* Encode metadata header at front. */
 	if (vi->mergeable_rx_bufs)
-		sg_set_buf(sg, mhdr, sizeof(*mhdr));
+		sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
 	else
-		sg_set_buf(sg, hdr, sizeof(*hdr));
+		sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
 
-	num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
-
-	err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
-	if (err >= 0 && !vi->free_in_tasklet)
-		mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
-
-	return err;
-}
-
-static void xmit_tasklet(unsigned long data)
-{
-	struct virtnet_info *vi = (void *)data;
-
-	netif_tx_lock_bh(vi->dev);
-	if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) >= 0) {
-		vi->svq->vq_ops->kick(vi->svq);
-		vi->last_xmit_skb = NULL;
-	}
-	if (vi->free_in_tasklet)
-		free_old_xmit_skbs(vi);
-	netif_tx_unlock_bh(vi->dev);
+	hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
+	return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
 }
 
 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	struct virtnet_info *vi = netdev_priv(dev);
+	int capacity;
 
 again:
 	/* Free up any pending old buffers before queueing new ones. */
 	free_old_xmit_skbs(vi);
 
-	/* If we has a buffer left over from last time, send it now. */
-	if (unlikely(vi->last_xmit_skb) &&
-	    xmit_skb(vi, vi->last_xmit_skb) < 0)
-		goto stop_queue;
-
-	vi->last_xmit_skb = NULL;
-
 	/* Put new one in send queue and do transmit */
-	if (likely(skb)) {
-		__skb_queue_head(&vi->send, skb);
-		if (xmit_skb(vi, skb) < 0) {
-			vi->last_xmit_skb = skb;
-			skb = NULL;
-			goto stop_queue;
+	__skb_queue_head(&vi->send, skb);
+	capacity = xmit_skb(vi, skb);
+
+	/* This can happen with OOM and indirect buffers. */
+	if (unlikely(capacity < 0)) {
+		netif_stop_queue(dev);
+		dev_warn(&dev->dev, "Unexpected full queue\n");
+		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+			vi->svq->vq_ops->disable_cb(vi->svq);
+			netif_start_queue(dev);
+			goto again;
+		}
+		return NETDEV_TX_BUSY;
+	}
+
+	vi->svq->vq_ops->kick(vi->svq);
+	/* Don't wait up for transmitted skbs to be freed. */
+	skb_orphan(skb);
+	nf_reset(skb);
+
+	/* Apparently nice girls don't return TX_BUSY; stop the queue
+	 * before it gets out of hand.  Naturally, this wastes entries. */
+	if (capacity < 2+MAX_SKB_FRAGS) {
+		netif_stop_queue(dev);
+		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+			/* More just got used, free them then recheck. */
+			capacity += free_old_xmit_skbs(vi);
+			if (capacity >= 2+MAX_SKB_FRAGS) {
+				netif_start_queue(dev);
+				vi->svq->vq_ops->disable_cb(vi->svq);
+			}
 		}
 	}
-done:
-	vi->svq->vq_ops->kick(vi->svq);
+
 	return NETDEV_TX_OK;
-
-stop_queue:
-	pr_debug("%s: virtio not prepared to send\n", dev->name);
-	netif_stop_queue(dev);
-
-	/* Activate callback for using skbs: if this returns false it
-	 * means some were used in the meantime. */
-	if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
-		vi->svq->vq_ops->disable_cb(vi->svq);
-		netif_start_queue(dev);
-		goto again;
-	}
-	if (skb) {
-		/* Drop this skb: we only queue one. */
-		vi->dev->stats.tx_dropped++;
-		kfree_skb(skb);
-	}
-	goto done;
 }
 
 static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -925,10 +885,6 @@
 	vi->pages = NULL;
 	INIT_DELAYED_WORK(&vi->refill, refill_work);
 
-	/* If they give us a callback when all buffers are done, we don't need
-	 * the timer. */
-	vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
-
 	/* If we can receive ANY GSO packets, we must allocate large ones. */
 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
 	    || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
@@ -960,11 +916,6 @@
 	skb_queue_head_init(&vi->recv);
 	skb_queue_head_init(&vi->send);
 
-	tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
-
-	if (!vi->free_in_tasklet)
-		setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
-
 	err = register_netdev(dev);
 	if (err) {
 		pr_debug("virtio_net: registering device failed\n");
@@ -1005,9 +956,6 @@
 	/* Stop all the virtqueues. */
 	vdev->config->reset(vdev);
 
-	if (!vi->free_in_tasklet)
-		del_timer_sync(&vi->xmit_free_timer);
-
 	/* Free our skbs in send and recv queues, if any. */
 	while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
 		kfree_skb(skb);
@@ -1041,7 +989,6 @@
 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
-	VIRTIO_F_NOTIFY_ON_EMPTY,
 };
 
 static struct virtio_driver virtio_net = {
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 6994a0f..80f3525 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -2,6 +2,7 @@
 	tristate "CIFS support (advanced network filesystem, SMBFS successor)"
 	depends on INET
 	select NLS
+	select SLOW_WORK
 	help
 	  This is the client VFS module for the Common Internet File System
 	  (CIFS) protocol which is the successor to the Server Message Block
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 90c5b39..9a5e4f5 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -64,9 +64,6 @@
 unsigned int extended_security = CIFSSEC_DEF;
 /* unsigned int ntlmv2_support = 0; */
 unsigned int sign_CIFS_PDUs = 1;
-extern struct task_struct *oplockThread; /* remove sparse warning */
-struct task_struct *oplockThread = NULL;
-/* extern struct task_struct * dnotifyThread; remove sparse warning */
 static const struct super_operations cifs_super_ops;
 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
 module_param(CIFSMaxBufSize, int, 0);
@@ -972,89 +969,12 @@
 	kmem_cache_destroy(cifs_oplock_cachep);
 }
 
-static int cifs_oplock_thread(void *dummyarg)
-{
-	struct oplock_q_entry *oplock_item;
-	struct cifsTconInfo *pTcon;
-	struct inode *inode;
-	__u16  netfid;
-	int rc, waitrc = 0;
-
-	set_freezable();
-	do {
-		if (try_to_freeze())
-			continue;
-
-		spin_lock(&cifs_oplock_lock);
-		if (list_empty(&cifs_oplock_list)) {
-			spin_unlock(&cifs_oplock_lock);
-			set_current_state(TASK_INTERRUPTIBLE);
-			schedule_timeout(39*HZ);
-		} else {
-			oplock_item = list_entry(cifs_oplock_list.next,
-						struct oplock_q_entry, qhead);
-			cFYI(1, ("found oplock item to write out"));
-			pTcon = oplock_item->tcon;
-			inode = oplock_item->pinode;
-			netfid = oplock_item->netfid;
-			spin_unlock(&cifs_oplock_lock);
-			DeleteOplockQEntry(oplock_item);
-			/* can not grab inode sem here since it would
-				deadlock when oplock received on delete
-				since vfs_unlink holds the i_mutex across
-				the call */
-			/* mutex_lock(&inode->i_mutex);*/
-			if (S_ISREG(inode->i_mode)) {
-#ifdef CONFIG_CIFS_EXPERIMENTAL
-				if (CIFS_I(inode)->clientCanCacheAll == 0)
-					break_lease(inode, FMODE_READ);
-				else if (CIFS_I(inode)->clientCanCacheRead == 0)
-					break_lease(inode, FMODE_WRITE);
-#endif
-				rc = filemap_fdatawrite(inode->i_mapping);
-				if (CIFS_I(inode)->clientCanCacheRead == 0) {
-					waitrc = filemap_fdatawait(
-							      inode->i_mapping);
-					invalidate_remote_inode(inode);
-				}
-				if (rc == 0)
-					rc = waitrc;
-			} else
-				rc = 0;
-			/* mutex_unlock(&inode->i_mutex);*/
-			if (rc)
-				CIFS_I(inode)->write_behind_rc = rc;
-			cFYI(1, ("Oplock flush inode %p rc %d",
-				inode, rc));
-
-				/* releasing stale oplock after recent reconnect
-				of smb session using a now incorrect file
-				handle is not a data integrity issue but do
-				not bother sending an oplock release if session
-				to server still is disconnected since oplock
-				already released by the server in that case */
-			if (!pTcon->need_reconnect) {
-				rc = CIFSSMBLock(0, pTcon, netfid,
-						0 /* len */ , 0 /* offset */, 0,
-						0, LOCKING_ANDX_OPLOCK_RELEASE,
-						false /* wait flag */);
-				cFYI(1, ("Oplock release rc = %d", rc));
-			}
-			set_current_state(TASK_INTERRUPTIBLE);
-			schedule_timeout(1);  /* yield in case q were corrupt */
-		}
-	} while (!kthread_should_stop());
-
-	return 0;
-}
-
 static int __init
 init_cifs(void)
 {
 	int rc = 0;
 	cifs_proc_init();
 	INIT_LIST_HEAD(&cifs_tcp_ses_list);
-	INIT_LIST_HEAD(&cifs_oplock_list);
 #ifdef CONFIG_CIFS_EXPERIMENTAL
 	INIT_LIST_HEAD(&GlobalDnotifyReqList);
 	INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);
@@ -1083,7 +1003,6 @@
 	rwlock_init(&GlobalSMBSeslock);
 	rwlock_init(&cifs_tcp_ses_lock);
 	spin_lock_init(&GlobalMid_Lock);
-	spin_lock_init(&cifs_oplock_lock);
 
 	if (cifs_max_pending < 2) {
 		cifs_max_pending = 2;
@@ -1118,16 +1037,13 @@
 	if (rc)
 		goto out_unregister_key_type;
 #endif
-	oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd");
-	if (IS_ERR(oplockThread)) {
-		rc = PTR_ERR(oplockThread);
-		cERROR(1, ("error %d create oplock thread", rc));
-		goto out_unregister_dfs_key_type;
-	}
+	rc = slow_work_register_user();
+	if (rc)
+		goto out_unregister_resolver_key;
 
 	return 0;
 
- out_unregister_dfs_key_type:
+ out_unregister_resolver_key:
 #ifdef CONFIG_CIFS_DFS_UPCALL
 	unregister_key_type(&key_type_dns_resolver);
  out_unregister_key_type:
@@ -1164,7 +1080,6 @@
 	cifs_destroy_inodecache();
 	cifs_destroy_mids();
 	cifs_destroy_request_bufs();
-	kthread_stop(oplockThread);
 }
 
 MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6cfc81a..5d0fde1 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -18,6 +18,7 @@
  */
 #include <linux/in.h>
 #include <linux/in6.h>
+#include <linux/slow-work.h>
 #include "cifs_fs_sb.h"
 #include "cifsacl.h"
 /*
@@ -346,14 +347,16 @@
 	/* lock scope id (0 if none) */
 	struct file *pfile; /* needed for writepage */
 	struct inode *pInode; /* needed for oplock break */
+	struct vfsmount *mnt;
 	struct mutex lock_mutex;
 	struct list_head llist; /* list of byte range locks we have. */
 	bool closePend:1;	/* file is marked to close */
 	bool invalidHandle:1;	/* file closed via session abend */
-	bool messageMode:1;	/* for pipes: message vs byte mode */
+	bool oplock_break_cancelled:1;
 	atomic_t count;		/* reference count */
 	struct mutex fh_mutex; /* prevents reopen race after dead ses*/
 	struct cifs_search_info srch_inf;
+	struct slow_work oplock_break; /* slow_work job for oplock breaks */
 };
 
 /* Take a reference on the file private data */
@@ -365,8 +368,10 @@
 /* Release a reference on the file private data */
 static inline void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 {
-	if (atomic_dec_and_test(&cifs_file->count))
+	if (atomic_dec_and_test(&cifs_file->count)) {
+		iput(cifs_file->pInode);
 		kfree(cifs_file);
+	}
 }
 
 /*
@@ -382,7 +387,6 @@
 	unsigned long time;	/* jiffies of last update/check of inode */
 	bool clientCanCacheRead:1;	/* read oplock */
 	bool clientCanCacheAll:1;	/* read and writebehind oplock */
-	bool oplockPending:1;
 	bool delete_pending:1;		/* DELETE_ON_CLOSE is set */
 	u64  server_eof;		/* current file size on server */
 	u64  uniqueid;			/* server inode number */
@@ -585,9 +589,9 @@
 #define   CIFSSEC_MUST_LANMAN	0x10010
 #define   CIFSSEC_MUST_PLNTXT	0x20020
 #ifdef CONFIG_CIFS_UPCALL
-#define   CIFSSEC_MASK          0xAF0AF /* allows weak security but also krb5 */
+#define   CIFSSEC_MASK          0xBF0BF /* allows weak security but also krb5 */
 #else
-#define   CIFSSEC_MASK          0xA70A7 /* current flags supported if weak */
+#define   CIFSSEC_MASK          0xB70B7 /* current flags supported if weak */
 #endif /* UPCALL */
 #else /* do not allow weak pw hash */
 #ifdef CONFIG_CIFS_UPCALL
@@ -669,12 +673,6 @@
  */
 GLOBAL_EXTERN rwlock_t GlobalSMBSeslock;
 
-/* Global list of oplocks */
-GLOBAL_EXTERN struct list_head cifs_oplock_list;
-
-/* Protects the cifs_oplock_list */
-GLOBAL_EXTERN spinlock_t cifs_oplock_lock;
-
 /* Outstanding dir notify requests */
 GLOBAL_EXTERN struct list_head GlobalDnotifyReqList;
 /* DirNotify response queue */
@@ -725,3 +723,4 @@
 GLOBAL_EXTERN unsigned int cifs_min_small;  /* min size of small buf pool */
 GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
 
+extern const struct slow_work_ops cifs_oplock_break_ops;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index da8fbf5..6928c24 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -86,18 +86,17 @@
 			     const int stage,
 			     const struct nls_table *nls_cp);
 extern __u16 GetNextMid(struct TCP_Server_Info *server);
-extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
-						 struct cifsTconInfo *);
-extern void DeleteOplockQEntry(struct oplock_q_entry *);
-extern void DeleteTconOplockQEntries(struct cifsTconInfo *);
 extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
 				      int offset);
 
+extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode,
+				__u16 fileHandle, struct file *file,
+				struct vfsmount *mnt, unsigned int oflags);
 extern int cifs_posix_open(char *full_path, struct inode **pinode,
-			   struct super_block *sb, int mode, int oflags,
-			   int *poplock, __u16 *pnetfid, int xid);
+			   struct vfsmount *mnt, int mode, int oflags,
+			   __u32 *poplock, __u16 *pnetfid, int xid);
 extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr,
 				     FILE_UNIX_BASIC_INFO *info,
 				     struct cifs_sb_info *cifs_sb);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 301e307..941441d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -94,6 +94,7 @@
 	list_for_each_safe(tmp, tmp1, &pTcon->openFileList) {
 		open_file = list_entry(tmp, struct cifsFileInfo, tlist);
 		open_file->invalidHandle = true;
+		open_file->oplock_break_cancelled = true;
 	}
 	write_unlock(&GlobalSMBSeslock);
 	/* BB Add call to invalidate_inodes(sb) for all superblocks mounted
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index d496824..43003e0 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1670,7 +1670,6 @@
 	CIFSSMBTDis(xid, tcon);
 	_FreeXid(xid);
 
-	DeleteTconOplockQEntries(tcon);
 	tconInfoFree(tcon);
 	cifs_put_smb_ses(ses);
 }
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index a6424cf..627a60a 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -24,6 +24,7 @@
 #include <linux/stat.h>
 #include <linux/slab.h>
 #include <linux/namei.h>
+#include <linux/mount.h>
 #include "cifsfs.h"
 #include "cifspdu.h"
 #include "cifsglob.h"
@@ -129,44 +130,45 @@
 	return full_path;
 }
 
-static void
-cifs_fill_fileinfo(struct inode *newinode, __u16 fileHandle,
-			struct cifsTconInfo *tcon, bool write_only)
+struct cifsFileInfo *
+cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle,
+		  struct file *file, struct vfsmount *mnt, unsigned int oflags)
 {
 	int oplock = 0;
 	struct cifsFileInfo *pCifsFile;
 	struct cifsInodeInfo *pCifsInode;
+	struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
 
 	pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
-
 	if (pCifsFile == NULL)
-		return;
+		return pCifsFile;
 
 	if (oplockEnabled)
 		oplock = REQ_OPLOCK;
 
 	pCifsFile->netfid = fileHandle;
 	pCifsFile->pid = current->tgid;
-	pCifsFile->pInode = newinode;
+	pCifsFile->pInode = igrab(newinode);
+	pCifsFile->mnt = mnt;
+	pCifsFile->pfile = file;
 	pCifsFile->invalidHandle = false;
 	pCifsFile->closePend = false;
 	mutex_init(&pCifsFile->fh_mutex);
 	mutex_init(&pCifsFile->lock_mutex);
 	INIT_LIST_HEAD(&pCifsFile->llist);
 	atomic_set(&pCifsFile->count, 1);
+	slow_work_init(&pCifsFile->oplock_break, &cifs_oplock_break_ops);
 
-	/* set the following in open now
-			pCifsFile->pfile = file; */
 	write_lock(&GlobalSMBSeslock);
-	list_add(&pCifsFile->tlist, &tcon->openFileList);
+	list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList);
 	pCifsInode = CIFS_I(newinode);
 	if (pCifsInode) {
 		/* if readable file instance put first in list*/
-		if (write_only)
+		if (oflags & FMODE_READ)
+			list_add(&pCifsFile->flist, &pCifsInode->openFileList);
+		else
 			list_add_tail(&pCifsFile->flist,
 				      &pCifsInode->openFileList);
-		else
-			list_add(&pCifsFile->flist, &pCifsInode->openFileList);
 
 		if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
 			pCifsInode->clientCanCacheAll = true;
@@ -176,18 +178,18 @@
 				pCifsInode->clientCanCacheRead = true;
 	}
 	write_unlock(&GlobalSMBSeslock);
+
+	return pCifsFile;
 }
 
 int cifs_posix_open(char *full_path, struct inode **pinode,
-		    struct super_block *sb, int mode, int oflags,
-		    int *poplock, __u16 *pnetfid, int xid)
+		    struct vfsmount *mnt, int mode, int oflags,
+		    __u32 *poplock, __u16 *pnetfid, int xid)
 {
 	int rc;
-	__u32 oplock;
-	bool write_only = false;
 	FILE_UNIX_BASIC_INFO *presp_data;
 	__u32 posix_flags = 0;
-	struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+	struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb);
 	struct cifs_fattr fattr;
 
 	cFYI(1, ("posix open %s", full_path));
@@ -223,12 +225,9 @@
 	if (oflags & O_DIRECT)
 		posix_flags |= SMB_O_DIRECT;
 
-	if (!(oflags & FMODE_READ))
-		write_only = true;
-
 	mode &= ~current_umask();
 	rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode,
-			pnetfid, presp_data, &oplock, full_path,
+			pnetfid, presp_data, poplock, full_path,
 			cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
 					CIFS_MOUNT_MAP_SPECIAL_CHR);
 	if (rc)
@@ -244,7 +243,7 @@
 
 	/* get new inode and set it up */
 	if (*pinode == NULL) {
-		*pinode = cifs_iget(sb, &fattr);
+		*pinode = cifs_iget(mnt->mnt_sb, &fattr);
 		if (!*pinode) {
 			rc = -ENOMEM;
 			goto posix_open_ret;
@@ -253,7 +252,7 @@
 		cifs_fattr_to_inode(*pinode, &fattr);
 	}
 
-	cifs_fill_fileinfo(*pinode, *pnetfid, cifs_sb->tcon, write_only);
+	cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags);
 
 posix_open_ret:
 	kfree(presp_data);
@@ -280,7 +279,7 @@
 	int rc = -ENOENT;
 	int xid;
 	int create_options = CREATE_NOT_DIR;
-	int oplock = 0;
+	__u32 oplock = 0;
 	int oflags;
 	bool posix_create = false;
 	/*
@@ -298,7 +297,6 @@
 	FILE_ALL_INFO *buf = NULL;
 	struct inode *newinode = NULL;
 	int disposition = FILE_OVERWRITE_IF;
-	bool write_only = false;
 
 	xid = GetXid();
 
@@ -323,7 +321,7 @@
 	if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) &&
 	    (CIFS_UNIX_POSIX_PATH_OPS_CAP &
 			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
-		rc = cifs_posix_open(full_path, &newinode, inode->i_sb,
+		rc = cifs_posix_open(full_path, &newinode, nd->path.mnt,
 				     mode, oflags, &oplock, &fileHandle, xid);
 		/* EIO could indicate that (posix open) operation is not
 		   supported, despite what server claimed in capability
@@ -351,11 +349,8 @@
 		desiredAccess = 0;
 		if (oflags & FMODE_READ)
 			desiredAccess |= GENERIC_READ; /* is this too little? */
-		if (oflags & FMODE_WRITE) {
+		if (oflags & FMODE_WRITE)
 			desiredAccess |= GENERIC_WRITE;
-			if (!(oflags & FMODE_READ))
-				write_only = true;
-		}
 
 		if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
 			disposition = FILE_CREATE;
@@ -470,8 +465,8 @@
 		/* mknod case - do not leave file open */
 		CIFSSMBClose(xid, tcon, fileHandle);
 	} else if (!(posix_create) && (newinode)) {
-			cifs_fill_fileinfo(newinode, fileHandle,
-					cifs_sb->tcon, write_only);
+			cifs_new_fileinfo(newinode, fileHandle, NULL,
+						nd->path.mnt, oflags);
 	}
 cifs_create_out:
 	kfree(buf);
@@ -611,7 +606,7 @@
 {
 	int xid;
 	int rc = 0; /* to get around spurious gcc warning, set to zero here */
-	int oplock = 0;
+	__u32 oplock = 0;
 	__u16 fileHandle = 0;
 	bool posix_open = false;
 	struct cifs_sb_info *cifs_sb;
@@ -683,8 +678,7 @@
 		if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
 		     (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
 		     (nd->intent.open.flags & O_CREAT)) {
-			rc = cifs_posix_open(full_path, &newInode,
-					parent_dir_inode->i_sb,
+			rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
 					nd->intent.open.create_mode,
 					nd->intent.open.flags, &oplock,
 					&fileHandle, xid);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fa7beac..429337e 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -30,6 +30,7 @@
 #include <linux/writeback.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/delay.h>
+#include <linux/mount.h>
 #include <asm/div64.h>
 #include "cifsfs.h"
 #include "cifspdu.h"
@@ -39,27 +40,6 @@
 #include "cifs_debug.h"
 #include "cifs_fs_sb.h"
 
-static inline struct cifsFileInfo *cifs_init_private(
-	struct cifsFileInfo *private_data, struct inode *inode,
-	struct file *file, __u16 netfid)
-{
-	memset(private_data, 0, sizeof(struct cifsFileInfo));
-	private_data->netfid = netfid;
-	private_data->pid = current->tgid;
-	mutex_init(&private_data->fh_mutex);
-	mutex_init(&private_data->lock_mutex);
-	INIT_LIST_HEAD(&private_data->llist);
-	private_data->pfile = file; /* needed for writepage */
-	private_data->pInode = inode;
-	private_data->invalidHandle = false;
-	private_data->closePend = false;
-	/* Initialize reference count to one.  The private data is
-	freed on the release of the last reference */
-	atomic_set(&private_data->count, 1);
-
-	return private_data;
-}
-
 static inline int cifs_convert_flags(unsigned int flags)
 {
 	if ((flags & O_ACCMODE) == O_RDONLY)
@@ -123,9 +103,11 @@
 }
 
 /* all arguments to this function must be checked for validity in caller */
-static inline int cifs_posix_open_inode_helper(struct inode *inode,
-			struct file *file, struct cifsInodeInfo *pCifsInode,
-			struct cifsFileInfo *pCifsFile, int oplock, u16 netfid)
+static inline int
+cifs_posix_open_inode_helper(struct inode *inode, struct file *file,
+			     struct cifsInodeInfo *pCifsInode,
+			     struct cifsFileInfo *pCifsFile, __u32 oplock,
+			     u16 netfid)
 {
 
 	write_lock(&GlobalSMBSeslock);
@@ -219,17 +201,6 @@
 	struct timespec temp;
 	int rc;
 
-	/* want handles we can use to read with first
-	   in the list so we do not have to walk the
-	   list to search for one in write_begin */
-	if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
-		list_add_tail(&pCifsFile->flist,
-			      &pCifsInode->openFileList);
-	} else {
-		list_add(&pCifsFile->flist,
-			 &pCifsInode->openFileList);
-	}
-	write_unlock(&GlobalSMBSeslock);
 	if (pCifsInode->clientCanCacheRead) {
 		/* we have the inode open somewhere else
 		   no need to discard cache data */
@@ -279,7 +250,8 @@
 int cifs_open(struct inode *inode, struct file *file)
 {
 	int rc = -EACCES;
-	int xid, oplock;
+	int xid;
+	__u32 oplock;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *tcon;
 	struct cifsFileInfo *pCifsFile;
@@ -324,7 +296,7 @@
 			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
 		int oflags = (int) cifs_posix_convert_flags(file->f_flags);
 		/* can not refresh inode info since size could be stale */
-		rc = cifs_posix_open(full_path, &inode, inode->i_sb,
+		rc = cifs_posix_open(full_path, &inode, file->f_path.mnt,
 				     cifs_sb->mnt_file_mode /* ignored */,
 				     oflags, &oplock, &netfid, xid);
 		if (rc == 0) {
@@ -414,24 +386,17 @@
 		cFYI(1, ("cifs_open returned 0x%x", rc));
 		goto out;
 	}
-	file->private_data =
-		kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
+
+	pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt,
+					file->f_flags);
+	file->private_data = pCifsFile;
 	if (file->private_data == NULL) {
 		rc = -ENOMEM;
 		goto out;
 	}
-	pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
-	write_lock(&GlobalSMBSeslock);
-	list_add(&pCifsFile->tlist, &tcon->openFileList);
 
-	pCifsInode = CIFS_I(file->f_path.dentry->d_inode);
-	if (pCifsInode) {
-		rc = cifs_open_inode_helper(inode, file, pCifsInode,
-					    pCifsFile, tcon,
-					    &oplock, buf, full_path, xid);
-	} else {
-		write_unlock(&GlobalSMBSeslock);
-	}
+	rc = cifs_open_inode_helper(inode, file, pCifsInode, pCifsFile, tcon,
+				    &oplock, buf, full_path, xid);
 
 	if (oplock & CIFS_CREATE_ACTION) {
 		/* time to set mode which we can not set earlier due to
@@ -474,7 +439,8 @@
 static int cifs_reopen_file(struct file *file, bool can_flush)
 {
 	int rc = -EACCES;
-	int xid, oplock;
+	int xid;
+	__u32 oplock;
 	struct cifs_sb_info *cifs_sb;
 	struct cifsTconInfo *tcon;
 	struct cifsFileInfo *pCifsFile;
@@ -543,7 +509,7 @@
 			le64_to_cpu(tcon->fsUnixInfo.Capability))) {
 		int oflags = (int) cifs_posix_convert_flags(file->f_flags);
 		/* can not refresh inode info since size could be stale */
-		rc = cifs_posix_open(full_path, NULL, inode->i_sb,
+		rc = cifs_posix_open(full_path, NULL, file->f_path.mnt,
 				     cifs_sb->mnt_file_mode /* ignored */,
 				     oflags, &oplock, &netfid, xid);
 		if (rc == 0) {
@@ -2308,6 +2274,73 @@
 	return rc;
 }
 
+static void
+cifs_oplock_break(struct slow_work *work)
+{
+	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+						  oplock_break);
+	struct inode *inode = cfile->pInode;
+	struct cifsInodeInfo *cinode = CIFS_I(inode);
+	struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb);
+	int rc, waitrc = 0;
+
+	if (inode && S_ISREG(inode->i_mode)) {
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+		if (cinode->clientCanCacheAll == 0)
+			break_lease(inode, FMODE_READ);
+		else if (cinode->clientCanCacheRead == 0)
+			break_lease(inode, FMODE_WRITE);
+#endif
+		rc = filemap_fdatawrite(inode->i_mapping);
+		if (cinode->clientCanCacheRead == 0) {
+			waitrc = filemap_fdatawait(inode->i_mapping);
+			invalidate_remote_inode(inode);
+		}
+		if (!rc)
+			rc = waitrc;
+		if (rc)
+			cinode->write_behind_rc = rc;
+		cFYI(1, ("Oplock flush inode %p rc %d", inode, rc));
+	}
+
+	/*
+	 * releasing stale oplock after recent reconnect of smb session using
+	 * a now incorrect file handle is not a data integrity issue but do
+	 * not bother sending an oplock release if session to server still is
+	 * disconnected since oplock already released by the server
+	 */
+	if (!cfile->closePend && !cfile->oplock_break_cancelled) {
+		rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0,
+				 LOCKING_ANDX_OPLOCK_RELEASE, false);
+		cFYI(1, ("Oplock release rc = %d", rc));
+	}
+}
+
+static int
+cifs_oplock_break_get(struct slow_work *work)
+{
+	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+						  oplock_break);
+	mntget(cfile->mnt);
+	cifsFileInfo_get(cfile);
+	return 0;
+}
+
+static void
+cifs_oplock_break_put(struct slow_work *work)
+{
+	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+						  oplock_break);
+	mntput(cfile->mnt);
+	cifsFileInfo_put(cfile);
+}
+
+const struct slow_work_ops cifs_oplock_break_ops = {
+	.get_ref	= cifs_oplock_break_get,
+	.put_ref	= cifs_oplock_break_put,
+	.execute	= cifs_oplock_break,
+};
+
 const struct address_space_operations cifs_addr_ops = {
 	.readpage = cifs_readpage,
 	.readpages = cifs_readpages,
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index e079a91..0241b25 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -32,7 +32,6 @@
 
 extern mempool_t *cifs_sm_req_poolp;
 extern mempool_t *cifs_req_poolp;
-extern struct task_struct *oplockThread;
 
 /* The xid serves as a useful identifier for each incoming vfs request,
    in a similar way to the mid which is useful to track each sent smb,
@@ -500,6 +499,7 @@
 	struct cifsTconInfo *tcon;
 	struct cifsInodeInfo *pCifsInode;
 	struct cifsFileInfo *netfile;
+	int rc;
 
 	cFYI(1, ("Checking for oplock break or dnotify response"));
 	if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
@@ -562,30 +562,40 @@
 				continue;
 
 			cifs_stats_inc(&tcon->num_oplock_brks);
-			write_lock(&GlobalSMBSeslock);
+			read_lock(&GlobalSMBSeslock);
 			list_for_each(tmp2, &tcon->openFileList) {
 				netfile = list_entry(tmp2, struct cifsFileInfo,
 						     tlist);
 				if (pSMB->Fid != netfile->netfid)
 					continue;
 
-				write_unlock(&GlobalSMBSeslock);
-				read_unlock(&cifs_tcp_ses_lock);
+				/*
+				 * don't do anything if file is about to be
+				 * closed anyway.
+				 */
+				if (netfile->closePend) {
+					read_unlock(&GlobalSMBSeslock);
+					read_unlock(&cifs_tcp_ses_lock);
+					return true;
+				}
+
 				cFYI(1, ("file id match, oplock break"));
 				pCifsInode = CIFS_I(netfile->pInode);
 				pCifsInode->clientCanCacheAll = false;
 				if (pSMB->OplockLevel == 0)
 					pCifsInode->clientCanCacheRead = false;
-				pCifsInode->oplockPending = true;
-				AllocOplockQEntry(netfile->pInode,
-						  netfile->netfid, tcon);
-				cFYI(1, ("about to wake up oplock thread"));
-				if (oplockThread)
-					wake_up_process(oplockThread);
-
+				rc = slow_work_enqueue(&netfile->oplock_break);
+				if (rc) {
+					cERROR(1, ("failed to enqueue oplock "
+						   "break: %d\n", rc));
+				} else {
+					netfile->oplock_break_cancelled = false;
+				}
+				read_unlock(&GlobalSMBSeslock);
+				read_unlock(&cifs_tcp_ses_lock);
 				return true;
 			}
-			write_unlock(&GlobalSMBSeslock);
+			read_unlock(&GlobalSMBSeslock);
 			read_unlock(&cifs_tcp_ses_lock);
 			cFYI(1, ("No matching file for oplock break"));
 			return true;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f823a4a..1f098ca 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -146,7 +146,7 @@
 	}
 }
 
-void
+static void
 cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info,
 		       struct cifs_sb_info *cifs_sb)
 {
@@ -161,7 +161,7 @@
 	cifs_fill_common_info(fattr, cifs_sb);
 }
 
-void
+static void
 cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info,
 		       struct cifs_sb_info *cifs_sb)
 {
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 1da4ab2..07b8e71 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -103,56 +103,6 @@
 	mempool_free(midEntry, cifs_mid_poolp);
 }
 
-struct oplock_q_entry *
-AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon)
-{
-	struct oplock_q_entry *temp;
-	if ((pinode == NULL) || (tcon == NULL)) {
-		cERROR(1, ("Null parms passed to AllocOplockQEntry"));
-		return NULL;
-	}
-	temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep,
-						       GFP_KERNEL);
-	if (temp == NULL)
-		return temp;
-	else {
-		temp->pinode = pinode;
-		temp->tcon = tcon;
-		temp->netfid = fid;
-		spin_lock(&cifs_oplock_lock);
-		list_add_tail(&temp->qhead, &cifs_oplock_list);
-		spin_unlock(&cifs_oplock_lock);
-	}
-	return temp;
-}
-
-void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry)
-{
-	spin_lock(&cifs_oplock_lock);
-    /* should we check if list empty first? */
-	list_del(&oplockEntry->qhead);
-	spin_unlock(&cifs_oplock_lock);
-	kmem_cache_free(cifs_oplock_cachep, oplockEntry);
-}
-
-
-void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)
-{
-	struct oplock_q_entry *temp;
-
-	if (tcon == NULL)
-		return;
-
-	spin_lock(&cifs_oplock_lock);
-	list_for_each_entry(temp, &cifs_oplock_list, qhead) {
-		if ((temp->tcon) && (temp->tcon == tcon)) {
-			list_del(&temp->qhead);
-			kmem_cache_free(cifs_oplock_cachep, temp);
-		}
-	}
-	spin_unlock(&cifs_oplock_lock);
-}
-
 static int
 smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
 {
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index fb61178..9d5360c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -250,9 +250,11 @@
  *   completion. Caller need not hold sb s_umount semaphore.
  *
  */
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
+void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
+			 long nr_pages)
 {
 	struct wb_writeback_args args = {
+		.sb		= sb,
 		.sync_mode	= WB_SYNC_NONE,
 		.nr_pages	= nr_pages,
 		.range_cyclic	= 1,
@@ -1206,7 +1208,7 @@
 	nr_to_write = nr_dirty + nr_unstable +
 			(inodes_stat.nr_inodes - inodes_stat.nr_unused);
 
-	bdi_start_writeback(sb->s_bdi, nr_to_write);
+	bdi_start_writeback(sb->s_bdi, sb, nr_to_write);
 }
 EXPORT_SYMBOL(writeback_inodes_sb);
 
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 0ee33c2..b449e73 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -101,7 +101,8 @@
 		const char *fmt, ...);
 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
 void bdi_unregister(struct backing_dev_info *bdi);
-void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
+void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
+				long nr_pages);
 int bdi_writeback_task(struct bdi_writeback *wb);
 int bdi_has_dirty_io(struct backing_dev_info *bdi);
 
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 69b5fba..a3b1409 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -596,7 +596,7 @@
 	    (!laptop_mode && ((global_page_state(NR_FILE_DIRTY)
 			       + global_page_state(NR_UNSTABLE_NFS))
 					  > background_thresh)))
-		bdi_start_writeback(bdi, 0);
+		bdi_start_writeback(bdi, NULL, 0);
 }
 
 void set_page_dirty_balance(struct page *page, int page_mkwrite)
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index d6b1b05..fbcac76 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -358,6 +358,7 @@
 	ax25_dev *ax25_dev;
 	ax25_cb *ax25;
 	unsigned int k;
+	int ret = 0;
 
 	if (copy_from_user(&ax25_ctl, arg, sizeof(ax25_ctl)))
 		return -EFAULT;
@@ -388,57 +389,63 @@
 	case AX25_WINDOW:
 		if (ax25->modulus == AX25_MODULUS) {
 			if (ax25_ctl.arg < 1 || ax25_ctl.arg > 7)
-				return -EINVAL;
+				goto einval_put;
 		} else {
 			if (ax25_ctl.arg < 1 || ax25_ctl.arg > 63)
-				return -EINVAL;
+				goto einval_put;
 		}
 		ax25->window = ax25_ctl.arg;
 		break;
 
 	case AX25_T1:
 		if (ax25_ctl.arg < 1)
-			return -EINVAL;
+			goto einval_put;
 		ax25->rtt = (ax25_ctl.arg * HZ) / 2;
 		ax25->t1  = ax25_ctl.arg * HZ;
 		break;
 
 	case AX25_T2:
 		if (ax25_ctl.arg < 1)
-			return -EINVAL;
+			goto einval_put;
 		ax25->t2 = ax25_ctl.arg * HZ;
 		break;
 
 	case AX25_N2:
 		if (ax25_ctl.arg < 1 || ax25_ctl.arg > 31)
-			return -EINVAL;
+			goto einval_put;
 		ax25->n2count = 0;
 		ax25->n2 = ax25_ctl.arg;
 		break;
 
 	case AX25_T3:
 		if (ax25_ctl.arg < 0)
-			return -EINVAL;
+			goto einval_put;
 		ax25->t3 = ax25_ctl.arg * HZ;
 		break;
 
 	case AX25_IDLE:
 		if (ax25_ctl.arg < 0)
-			return -EINVAL;
+			goto einval_put;
 		ax25->idle = ax25_ctl.arg * 60 * HZ;
 		break;
 
 	case AX25_PACLEN:
 		if (ax25_ctl.arg < 16 || ax25_ctl.arg > 65535)
-			return -EINVAL;
+			goto einval_put;
 		ax25->paclen = ax25_ctl.arg;
 		break;
 
 	default:
-		return -EINVAL;
+		goto einval_put;
 	  }
 
-	return 0;
+out_put:
+	ax25_cb_put(ax25);
+	return ret;
+
+einval_put:
+	ret = -EINVAL;
+	goto out_put;
 }
 
 static void ax25_fillin_cb_from_dev(ax25_cb *ax25, ax25_dev *ax25_dev)