IB/qib: Fix race between qib_error_qp() and receive packet processing

When transitioning a QP to the error state, in progress RWQEs need to
be marked complete.  This also involves releasing the reference count
to the memory regions referenced in the SGEs.  The locking in the
receive packet processing wasn't sufficient to prevent qib_error_qp()
from modifying the r_sge state at the same time, thus leading to
kernel panics.

Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 40c0a37..a093111 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -868,7 +868,7 @@
 
 /*
  * Back up requester to resend the last un-ACKed request.
- * The QP s_lock should be held and interrupts disabled.
+ * The QP r_lock and s_lock should be held and interrupts disabled.
  */
 static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait)
 {
@@ -911,7 +911,8 @@
 	struct qib_ibport *ibp;
 	unsigned long flags;
 
-	spin_lock_irqsave(&qp->s_lock, flags);
+	spin_lock_irqsave(&qp->r_lock, flags);
+	spin_lock(&qp->s_lock);
 	if (qp->s_flags & QIB_S_TIMER) {
 		ibp = to_iport(qp->ibqp.device, qp->port_num);
 		ibp->n_rc_timeouts++;
@@ -920,7 +921,8 @@
 		qib_restart_rc(qp, qp->s_last_psn + 1, 1);
 		qib_schedule_send(qp);
 	}
-	spin_unlock_irqrestore(&qp->s_lock, flags);
+	spin_unlock(&qp->s_lock);
+	spin_unlock_irqrestore(&qp->r_lock, flags);
 }
 
 /*
@@ -1414,10 +1416,6 @@
 
 	spin_lock_irqsave(&qp->s_lock, flags);
 
-	/* Double check we can process this now that we hold the s_lock. */
-	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
-		goto ack_done;
-
 	/* Ignore invalid responses. */
 	if (qib_cmp24(psn, qp->s_next_psn) >= 0)
 		goto ack_done;
@@ -1661,9 +1659,6 @@
 	ibp->n_rc_dupreq++;
 
 	spin_lock_irqsave(&qp->s_lock, flags);
-	/* Double check we can process this now that we hold the s_lock. */
-	if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
-		goto unlock_done;
 
 	for (i = qp->r_head_ack_queue; ; i = prev) {
 		if (i == qp->s_tail_ack_queue)
@@ -1878,9 +1873,6 @@
 	psn = be32_to_cpu(ohdr->bth[2]);
 	opcode >>= 24;
 
-	/* Prevent simultaneous processing after APM on different CPUs */
-	spin_lock(&qp->r_lock);
-
 	/*
 	 * Process responses (ACKs) before anything else.  Note that the
 	 * packet sequence number will be for something in the send work
@@ -1891,14 +1883,14 @@
 	    opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
 		qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn,
 				hdrsize, pmtu, rcd);
-		goto runlock;
+		return;
 	}
 
 	/* Compute 24 bits worth of difference. */
 	diff = qib_cmp24(psn, qp->r_psn);
 	if (unlikely(diff)) {
 		if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
-			goto runlock;
+			return;
 		goto send_ack;
 	}
 
@@ -2090,9 +2082,6 @@
 		if (next > QIB_MAX_RDMA_ATOMIC)
 			next = 0;
 		spin_lock_irqsave(&qp->s_lock, flags);
-		/* Double check we can process this while holding the s_lock. */
-		if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
-			goto srunlock;
 		if (unlikely(next == qp->s_tail_ack_queue)) {
 			if (!qp->s_ack_queue[next].sent)
 				goto nack_inv_unlck;
@@ -2146,7 +2135,7 @@
 		qp->s_flags |= QIB_S_RESP_PENDING;
 		qib_schedule_send(qp);
 
-		goto srunlock;
+		goto sunlock;
 	}
 
 	case OP(COMPARE_SWAP):
@@ -2165,9 +2154,6 @@
 		if (next > QIB_MAX_RDMA_ATOMIC)
 			next = 0;
 		spin_lock_irqsave(&qp->s_lock, flags);
-		/* Double check we can process this while holding the s_lock. */
-		if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
-			goto srunlock;
 		if (unlikely(next == qp->s_tail_ack_queue)) {
 			if (!qp->s_ack_queue[next].sent)
 				goto nack_inv_unlck;
@@ -2213,7 +2199,7 @@
 		qp->s_flags |= QIB_S_RESP_PENDING;
 		qib_schedule_send(qp);
 
-		goto srunlock;
+		goto sunlock;
 	}
 
 	default:
@@ -2227,7 +2213,7 @@
 	/* Send an ACK if requested or required. */
 	if (psn & (1 << 31))
 		goto send_ack;
-	goto runlock;
+	return;
 
 rnr_nak:
 	qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
@@ -2238,7 +2224,7 @@
 		atomic_inc(&qp->refcount);
 		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
 	}
-	goto runlock;
+	return;
 
 nack_op_err:
 	qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
@@ -2250,7 +2236,7 @@
 		atomic_inc(&qp->refcount);
 		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
 	}
-	goto runlock;
+	return;
 
 nack_inv_unlck:
 	spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2264,7 +2250,7 @@
 		atomic_inc(&qp->refcount);
 		list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
 	}
-	goto runlock;
+	return;
 
 nack_acc_unlck:
 	spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -2274,13 +2260,6 @@
 	qp->r_ack_psn = qp->r_psn;
 send_ack:
 	qib_send_rc_ack(qp);
-runlock:
-	spin_unlock(&qp->r_lock);
-	return;
-
-srunlock:
-	spin_unlock_irqrestore(&qp->s_lock, flags);
-	spin_unlock(&qp->r_lock);
 	return;
 
 sunlock: