Merge branch 'sfc-2.6.39' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-2.6
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index d890679..a3c2aab 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -328,7 +328,8 @@
  * processing to finish, then directly poll (and ack ) the eventq.
  * Finally reenable NAPI and interrupts.
  *
- * Since we are touching interrupts the caller should hold the suspend lock
+ * This is for use only during a loopback self-test.  It must not
+ * deliver any packets up the stack as this can result in deadlock.
  */
 void efx_process_channel_now(struct efx_channel *channel)
 {
@@ -336,6 +337,7 @@
 
 	BUG_ON(channel->channel >= efx->n_channels);
 	BUG_ON(!channel->enabled);
+	BUG_ON(!efx->loopback_selftest);
 
 	/* Disable interrupts and wait for ISRs to complete */
 	efx_nic_disable_interrupts(efx);
@@ -1436,7 +1438,7 @@
 	 * restart the transmit interface early so the watchdog timer stops */
 	efx_start_port(efx);
 
-	if (efx_dev_registered(efx))
+	if (efx_dev_registered(efx) && !efx->port_inhibited)
 		netif_tx_wake_all_queues(efx->net_dev);
 
 	efx_for_each_channel(channel, efx)
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index d9d8c2e..cc97880 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -152,6 +152,7 @@
 
 	spin_lock_irqsave(&efx->biu_lock, flags);
 	value->u32[0] = _efx_readd(efx, reg + 0);
+	rmb();
 	value->u32[1] = _efx_readd(efx, reg + 4);
 	value->u32[2] = _efx_readd(efx, reg + 8);
 	value->u32[3] = _efx_readd(efx, reg + 12);
@@ -174,6 +175,7 @@
 	value->u64[0] = (__force __le64)__raw_readq(membase + addr);
 #else
 	value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+	rmb();
 	value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
 #endif
 	spin_unlock_irqrestore(&efx->biu_lock, flags);
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 9ffa9a6..191a311 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -330,7 +330,6 @@
  * @eventq_mask: Event queue pointer mask
  * @eventq_read_ptr: Event queue read pointer
  * @last_eventq_read_ptr: Last event queue read pointer value.
- * @magic_count: Event queue test event count
  * @irq_count: Number of IRQs since last adaptive moderation decision
  * @irq_mod_score: IRQ moderation score
  * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -360,7 +359,6 @@
 	unsigned int eventq_mask;
 	unsigned int eventq_read_ptr;
 	unsigned int last_eventq_read_ptr;
-	unsigned int magic_count;
 
 	unsigned int irq_count;
 	unsigned int irq_mod_score;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index e839661..10f1cb7 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -84,7 +84,8 @@
 static inline efx_qword_t *efx_event(struct efx_channel *channel,
 				     unsigned int index)
 {
-	return ((efx_qword_t *) (channel->eventq.addr)) + index;
+	return ((efx_qword_t *) (channel->eventq.addr)) +
+		(index & channel->eventq_mask);
 }
 
 /* See if an event is present
@@ -673,7 +674,8 @@
 	efx_dword_t reg;
 	struct efx_nic *efx = channel->efx;
 
-	EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
+	EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+			     channel->eventq_read_ptr & channel->eventq_mask);
 	efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
 			 channel->channel);
 }
@@ -908,7 +910,7 @@
 
 	code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
 	if (code == EFX_CHANNEL_MAGIC_TEST(channel))
-		++channel->magic_count;
+		; /* ignore */
 	else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
 		/* The queue must be empty, so we won't receive any rx
 		 * events, so efx_process_channel() won't refill the
@@ -1015,8 +1017,7 @@
 		/* Clear this event by marking it all ones */
 		EFX_SET_QWORD(*p_event);
 
-		/* Increment read pointer */
-		read_ptr = (read_ptr + 1) & channel->eventq_mask;
+		++read_ptr;
 
 		ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
 
@@ -1060,6 +1061,13 @@
 	return spent;
 }
 
+/* Check whether an event is present in the eventq at the current
+ * read pointer.  Only useful for self-test.
+ */
+bool efx_nic_event_present(struct efx_channel *channel)
+{
+	return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
 
 /* Allocate buffer table entries for event queue */
 int efx_nic_probe_eventq(struct efx_channel *channel)
@@ -1165,7 +1173,7 @@
 	struct efx_tx_queue *tx_queue;
 	struct efx_rx_queue *rx_queue;
 	unsigned int read_ptr = channel->eventq_read_ptr;
-	unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
+	unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
 
 	do {
 		efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1205,7 +1213,7 @@
 		 * it's ok to throw away every non-flush event */
 		EFX_SET_QWORD(*event);
 
-		read_ptr = (read_ptr + 1) & channel->eventq_mask;
+		++read_ptr;
 	} while (read_ptr != end_ptr);
 
 	channel->eventq_read_ptr = read_ptr;
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index d9de1b6..a42db6e 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -184,6 +184,7 @@
 extern void efx_nic_remove_eventq(struct efx_channel *channel);
 extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
 extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+extern bool efx_nic_event_present(struct efx_channel *channel);
 
 /* MAC/PHY */
 extern void falcon_drain_tx_fifo(struct efx_nic *efx);
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index a0f49b3..50ad3bc 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -131,8 +131,6 @@
 static int efx_test_interrupts(struct efx_nic *efx,
 			       struct efx_self_tests *tests)
 {
-	struct efx_channel *channel;
-
 	netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
 	tests->interrupt = -1;
 
@@ -140,15 +138,6 @@
 	efx->last_irq_cpu = -1;
 	smp_wmb();
 
-	/* ACK each interrupting event queue. Receiving an interrupt due to
-	 * traffic before a test event is raised is considered a pass */
-	efx_for_each_channel(channel, efx) {
-		if (channel->work_pending)
-			efx_process_channel_now(channel);
-		if (efx->last_irq_cpu >= 0)
-			goto success;
-	}
-
 	efx_nic_generate_interrupt(efx);
 
 	/* Wait for arrival of test interrupt. */
@@ -173,13 +162,13 @@
 			       struct efx_self_tests *tests)
 {
 	struct efx_nic *efx = channel->efx;
-	unsigned int magic_count, count;
+	unsigned int read_ptr, count;
 
 	tests->eventq_dma[channel->channel] = -1;
 	tests->eventq_int[channel->channel] = -1;
 	tests->eventq_poll[channel->channel] = -1;
 
-	magic_count = channel->magic_count;
+	read_ptr = channel->eventq_read_ptr;
 	channel->efx->last_irq_cpu = -1;
 	smp_wmb();
 
@@ -190,10 +179,7 @@
 	do {
 		schedule_timeout_uninterruptible(HZ / 100);
 
-		if (channel->work_pending)
-			efx_process_channel_now(channel);
-
-		if (channel->magic_count != magic_count)
+		if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
 			goto eventq_ok;
 	} while (++count < 2);
 
@@ -211,8 +197,7 @@
 	}
 
 	/* Check to see if event was received even if interrupt wasn't */
-	efx_process_channel_now(channel);
-	if (channel->magic_count != magic_count) {
+	if (efx_nic_event_present(channel)) {
 		netif_err(efx, drv, efx->net_dev,
 			  "channel %d event was generated, but "
 			  "failed to trigger an interrupt\n", channel->channel);
@@ -770,6 +755,8 @@
 	__efx_reconfigure_port(efx);
 	mutex_unlock(&efx->mac_lock);
 
+	netif_tx_wake_all_queues(efx->net_dev);
+
 	return rc_test;
 }
 
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 1398019..d2c85df 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -435,7 +435,8 @@
 	 * queue state. */
 	smp_mb();
 	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
-	    likely(efx->port_enabled)) {
+	    likely(efx->port_enabled) &&
+	    likely(!efx->port_inhibited)) {
 		fill_level = tx_queue->insert_count - tx_queue->read_count;
 		if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
 			EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));