[SCTP]: Fix couple of races between sctp_peeloff() and sctp_rcv().

Validate and update the sk in sctp_rcv() to avoid the race where an
assoc/ep could move to a different socket after we get the sk, but before
the skb is added to the backlog.

Also migrate the skb's in backlog queue to new sk when doing a peeloff.

Signed-off-by: Sridhar Samudrala <sri@us.ibm.com>
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index a553f39..e673b2c 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -175,6 +175,8 @@
 void sctp_icmp_proto_unreachable(struct sock *sk,
 				 struct sctp_association *asoc,
 				 struct sctp_transport *t);
+void sctp_backlog_migrate(struct sctp_association *assoc,
+			  struct sock *oldsk, struct sock *newsk);
 
 /*
  *  Section:  Macros, externs, and inlines
diff --git a/net/sctp/input.c b/net/sctp/input.c
index c463e40..71fd563 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -257,12 +257,21 @@
 	 */
 	sctp_bh_lock_sock(sk);
 
+	/* It is possible that the association could have moved to a different
+	 * socket if it is peeled off. If so, update the sk.
+	 */ 
+	if (sk != rcvr->sk) {
+		sctp_bh_lock_sock(rcvr->sk);
+		sctp_bh_unlock_sock(sk);
+		sk = rcvr->sk;
+	}
+
 	if (sock_owned_by_user(sk))
 		sk_add_backlog(sk, skb);
 	else
 		sctp_backlog_rcv(sk, skb);
 
-	/* Release the sock and the sock ref we took in the lookup calls. 
+	/* Release the sock and the sock ref we took in the lookup calls.
 	 * The asoc/ep ref will be released in sctp_backlog_rcv.
 	 */
 	sctp_bh_unlock_sock(sk);
@@ -297,6 +306,9 @@
  	struct sctp_ep_common *rcvr = NULL;
 
  	rcvr = chunk->rcvr;
+
+	BUG_TRAP(rcvr->sk == sk);
+
  	if (rcvr->dead) {
  		sctp_chunk_free(chunk);
  	} else {
@@ -313,6 +325,27 @@
         return 0;
 }
 
+void sctp_backlog_migrate(struct sctp_association *assoc, 
+			  struct sock *oldsk, struct sock *newsk)
+{
+	struct sk_buff *skb;
+	struct sctp_chunk *chunk;
+
+	skb = oldsk->sk_backlog.head;
+	oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL;
+	while (skb != NULL) {
+		struct sk_buff *next = skb->next;
+
+		chunk = SCTP_INPUT_CB(skb)->chunk;
+		skb->next = NULL;
+		if (&assoc->base == chunk->rcvr)
+			sk_add_backlog(newsk, skb);
+		else
+			sk_add_backlog(oldsk, skb);
+		skb = next;
+	}
+}
+
 /* Handle icmp frag needed error. */
 void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc,
 			   struct sctp_transport *t, __u32 pmtu)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6a0b1af..fb1821d 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5602,8 +5602,12 @@
 	 */
 	newsp->type = type;
 
+	spin_lock_bh(&oldsk->sk_lock.slock);
+	/* Migrate the backlog from oldsk to newsk. */
+	sctp_backlog_migrate(assoc, oldsk, newsk);
 	/* Migrate the association to the new socket. */
 	sctp_assoc_migrate(assoc, newsk);
+	spin_unlock_bh(&oldsk->sk_lock.slock);
 
 	/* If the association on the newsk is already closed before accept()
 	 * is called, set RCV_SHUTDOWN flag.