Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6:
  [IRDA]: fix printk format
  [NETPOLL] netconsole: fix soft lockup when removing module
  [NETPOLL]: tx lock deadlock fix
  SCTP: lock_sock_nested in sctp_sock_migrate
  SCTP: Fix sctp_getsockopt_get_peer_addrs
  SCTP: update sctp_getsockopt helpers to allow oversized buffers
diff --git a/drivers/net/irda/irport.c b/drivers/net/irda/irport.c
index 3098960..3078c41 100644
--- a/drivers/net/irda/irport.c
+++ b/drivers/net/irda/irport.c
@@ -509,7 +509,7 @@
 	IRDA_DEBUG(0, "%s(), iir=%02x, lsr=%02x, iobase=%#x\n", 
 		   __FUNCTION__, iir, lsr, iobase);
 
-	IRDA_DEBUG(0, "%s(), transmitting=%d, remain=%d, done=%d\n", 
+	IRDA_DEBUG(0, "%s(), transmitting=%d, remain=%d, done=%td\n",
 		   __FUNCTION__, self->transmitting, self->tx_buff.len,
 		   self->tx_buff.data - self->tx_buff.head);
 
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 758dafe..cf40ff9 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -72,7 +72,8 @@
 			netif_tx_unlock(dev);
 			local_irq_restore(flags);
 
-			schedule_delayed_work(&npinfo->tx_work, HZ/10);
+			if (atomic_read(&npinfo->refcnt))
+				schedule_delayed_work(&npinfo->tx_work, HZ/10);
 			return;
 		}
 		netif_tx_unlock(dev);
@@ -250,22 +251,23 @@
 		unsigned long flags;
 
 		local_irq_save(flags);
-		if (netif_tx_trylock(dev)) {
-			/* try until next clock tick */
-			for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
-					tries > 0; --tries) {
+		/* try until next clock tick */
+		for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
+		     tries > 0; --tries) {
+			if (netif_tx_trylock(dev)) {
 				if (!netif_queue_stopped(dev))
 					status = dev->hard_start_xmit(skb, dev);
+				netif_tx_unlock(dev);
 
 				if (status == NETDEV_TX_OK)
 					break;
 
-				/* tickle device maybe there is some cleanup */
-				netpoll_poll(np);
-
-				udelay(USEC_PER_POLL);
 			}
-			netif_tx_unlock(dev);
+
+			/* tickle device maybe there is some cleanup */
+			netpoll_poll(np);
+
+			udelay(USEC_PER_POLL);
 		}
 		local_irq_restore(flags);
 	}
@@ -784,9 +786,15 @@
 			if (atomic_dec_and_test(&npinfo->refcnt)) {
 				skb_queue_purge(&npinfo->arp_tx);
 				skb_queue_purge(&npinfo->txq);
-				cancel_rearming_delayed_work(&npinfo->tx_work);
+				cancel_delayed_work(&npinfo->tx_work);
 				flush_scheduled_work();
 
+				/* clean after last, unfinished work */
+				if (!skb_queue_empty(&npinfo->txq)) {
+					struct sk_buff *skb;
+					skb = __skb_dequeue(&npinfo->txq);
+					kfree_skb(skb);
+				}
 				kfree(npinfo);
 			}
 		}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 6edaaa00..67861a8 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3375,12 +3375,13 @@
 	sctp_assoc_t associd;
 	int retval = 0;
 
-	if (len != sizeof(status)) {
+	if (len < sizeof(status)) {
 		retval = -EINVAL;
 		goto out;
 	}
 
-	if (copy_from_user(&status, optval, sizeof(status))) {
+	len = sizeof(status);
+	if (copy_from_user(&status, optval, len)) {
 		retval = -EFAULT;
 		goto out;
 	}
@@ -3452,12 +3453,13 @@
 	struct sctp_transport *transport;
 	int retval = 0;
 
-	if (len != sizeof(pinfo)) {
+	if (len < sizeof(pinfo)) {
 		retval = -EINVAL;
 		goto out;
 	}
 
-	if (copy_from_user(&pinfo, optval, sizeof(pinfo))) {
+	len = sizeof(pinfo);
+	if (copy_from_user(&pinfo, optval, len)) {
 		retval = -EFAULT;
 		goto out;
 	}
@@ -3523,8 +3525,11 @@
 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
 				  int __user *optlen)
 {
-	if (len != sizeof(struct sctp_event_subscribe))
+	if (len < sizeof(struct sctp_event_subscribe))
 		return -EINVAL;
+	len = sizeof(struct sctp_event_subscribe);
+	if (put_user(len, optlen))
+		return -EFAULT;
 	if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
 		return -EFAULT;
 	return 0;
@@ -3546,9 +3551,12 @@
 	/* Applicable to UDP-style socket only */
 	if (sctp_style(sk, TCP))
 		return -EOPNOTSUPP;
-	if (len != sizeof(int))
+	if (len < sizeof(int))
 		return -EINVAL;
-	if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len))
+	len = sizeof(int);
+	if (put_user(len, optlen))
+		return -EFAULT;
+	if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
 		return -EFAULT;
 	return 0;
 }
@@ -3599,8 +3607,9 @@
 	int retval = 0;
 	struct sctp_association *asoc;
 
-	if (len != sizeof(sctp_peeloff_arg_t))
+	if (len < sizeof(sctp_peeloff_arg_t))
 		return -EINVAL;
+	len = sizeof(sctp_peeloff_arg_t);
 	if (copy_from_user(&peeloff, optval, len))
 		return -EFAULT;
 
@@ -3628,6 +3637,8 @@
 
 	/* Return the fd mapped to the new socket.  */
 	peeloff.sd = retval;
+	if (put_user(len, optlen))
+		return -EFAULT;
 	if (copy_to_user(optval, &peeloff, len))
 		retval = -EFAULT;
 
@@ -3736,9 +3747,9 @@
 	struct sctp_association *asoc = NULL;
 	struct sctp_sock        *sp = sctp_sk(sk);
 
-	if (len != sizeof(struct sctp_paddrparams))
+	if (len < sizeof(struct sctp_paddrparams))
 		return -EINVAL;
-
+	len = sizeof(struct sctp_paddrparams);
 	if (copy_from_user(&params, optval, len))
 		return -EFAULT;
 
@@ -3837,9 +3848,11 @@
 	struct sctp_association *asoc = NULL;
 	struct sctp_sock        *sp = sctp_sk(sk);
 
-	if (len != sizeof(struct sctp_assoc_value))
+	if (len < sizeof(struct sctp_assoc_value))
 		return - EINVAL;
 
+	len = sizeof(struct sctp_assoc_value);
+
 	if (copy_from_user(&params, optval, len))
 		return -EFAULT;
 
@@ -3888,8 +3901,11 @@
  */
 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
 {
-	if (len != sizeof(struct sctp_initmsg))
+	if (len < sizeof(struct sctp_initmsg))
 		return -EINVAL;
+	len = sizeof(struct sctp_initmsg);
+	if (put_user(len, optlen))
+		return -EFAULT;
 	if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
 		return -EFAULT;
 	return 0;
@@ -3904,7 +3920,7 @@
 	struct list_head *pos;
 	int cnt = 0;
 
-	if (len != sizeof(sctp_assoc_t))
+	if (len < sizeof(sctp_assoc_t))
 		return -EINVAL;
 
 	if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
@@ -3940,10 +3956,12 @@
 	struct sctp_sock *sp = sctp_sk(sk);
 	int addrlen;
 
-	if (len != sizeof(struct sctp_getaddrs_old))
+	if (len < sizeof(struct sctp_getaddrs_old))
 		return -EINVAL;
 
-	if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old)))
+	len = sizeof(struct sctp_getaddrs_old);
+
+	if (copy_from_user(&getaddrs, optval, len))
 		return -EFAULT;
 
 	if (getaddrs.addr_num <= 0) return -EINVAL;
@@ -3966,7 +3984,9 @@
 		if (cnt >= getaddrs.addr_num) break;
 	}
 	getaddrs.addr_num = cnt;
-	if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old)))
+	if (put_user(len, optlen))
+		return -EFAULT;
+	if (copy_to_user(optval, &getaddrs, len))
 		return -EFAULT;
 
 	return 0;
@@ -3999,8 +4019,7 @@
 		return -EINVAL;
 
 	to = optval + offsetof(struct sctp_getaddrs,addrs);
-	space_left = len - sizeof(struct sctp_getaddrs) -
-			offsetof(struct sctp_getaddrs,addrs);
+	space_left = len - offsetof(struct sctp_getaddrs,addrs);
 
 	list_for_each(pos, &asoc->peer.transport_addr_list) {
 		from = list_entry(pos, struct sctp_transport, transports);
@@ -4037,7 +4056,7 @@
 	rwlock_t *addr_lock;
 	int cnt = 0;
 
-	if (len != sizeof(sctp_assoc_t))
+	if (len < sizeof(sctp_assoc_t))
 		return -EINVAL;
 
 	if (copy_from_user(&id, optval, sizeof(sctp_assoc_t)))
@@ -4179,10 +4198,11 @@
 	void *buf;
 	int bytes_copied = 0;
 
-	if (len != sizeof(struct sctp_getaddrs_old))
+	if (len < sizeof(struct sctp_getaddrs_old))
 		return -EINVAL;
 
-	if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old)))
+	len = sizeof(struct sctp_getaddrs_old);
+	if (copy_from_user(&getaddrs, optval, len))
 		return -EFAULT;
 
 	if (getaddrs.addr_num <= 0) return -EINVAL;
@@ -4254,7 +4274,7 @@
 
 	/* copy the leading structure back to user */
 	getaddrs.addr_num = cnt;
-	if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old)))
+	if (copy_to_user(optval, &getaddrs, len))
 		err = -EFAULT;
 
 error:
@@ -4282,7 +4302,7 @@
 	void *addrs;
 	void *buf;
 
-	if (len <= sizeof(struct sctp_getaddrs))
+	if (len < sizeof(struct sctp_getaddrs))
 		return -EINVAL;
 
 	if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs)))
@@ -4306,8 +4326,8 @@
 	}
 
 	to = optval + offsetof(struct sctp_getaddrs,addrs);
-	space_left = len - sizeof(struct sctp_getaddrs) -
-			 offsetof(struct sctp_getaddrs,addrs);
+	space_left = len - offsetof(struct sctp_getaddrs,addrs);
+
 	addrs = kmalloc(space_left, GFP_KERNEL);
 	if (!addrs)
 		return -ENOMEM;
@@ -4379,10 +4399,12 @@
 	struct sctp_association *asoc;
 	struct sctp_sock *sp = sctp_sk(sk);
 
-	if (len != sizeof(struct sctp_prim))
+	if (len < sizeof(struct sctp_prim))
 		return -EINVAL;
 
-	if (copy_from_user(&prim, optval, sizeof(struct sctp_prim)))
+	len = sizeof(struct sctp_prim);
+
+	if (copy_from_user(&prim, optval, len))
 		return -EFAULT;
 
 	asoc = sctp_id2assoc(sk, prim.ssp_assoc_id);
@@ -4398,7 +4420,9 @@
 	sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp,
 			(union sctp_addr *)&prim.ssp_addr);
 
-	if (copy_to_user(optval, &prim, sizeof(struct sctp_prim)))
+	if (put_user(len, optlen))
+		return -EFAULT;
+	if (copy_to_user(optval, &prim, len))
 		return -EFAULT;
 
 	return 0;
@@ -4415,10 +4439,15 @@
 {
 	struct sctp_setadaptation adaptation;
 
-	if (len != sizeof(struct sctp_setadaptation))
+	if (len < sizeof(struct sctp_setadaptation))
 		return -EINVAL;
 
+	len = sizeof(struct sctp_setadaptation);
+
 	adaptation.ssb_adaptation_ind = sctp_sk(sk)->adaptation_ind;
+
+	if (put_user(len, optlen))
+		return -EFAULT;
 	if (copy_to_user(optval, &adaptation, len))
 		return -EFAULT;
 
@@ -4452,9 +4481,12 @@
 	struct sctp_association *asoc;
 	struct sctp_sock *sp = sctp_sk(sk);
 
-	if (len != sizeof(struct sctp_sndrcvinfo))
+	if (len < sizeof(struct sctp_sndrcvinfo))
 		return -EINVAL;
-	if (copy_from_user(&info, optval, sizeof(struct sctp_sndrcvinfo)))
+
+	len = sizeof(struct sctp_sndrcvinfo);
+
+	if (copy_from_user(&info, optval, len))
 		return -EFAULT;
 
 	asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
@@ -4475,7 +4507,9 @@
 		info.sinfo_timetolive = sp->default_timetolive;
 	}
 
-	if (copy_to_user(optval, &info, sizeof(struct sctp_sndrcvinfo)))
+	if (put_user(len, optlen))
+		return -EFAULT;
+	if (copy_to_user(optval, &info, len))
 		return -EFAULT;
 
 	return 0;
@@ -4526,10 +4560,12 @@
 	struct sctp_rtoinfo rtoinfo;
 	struct sctp_association *asoc;
 
-	if (len != sizeof (struct sctp_rtoinfo))
+	if (len < sizeof (struct sctp_rtoinfo))
 		return -EINVAL;
 
-	if (copy_from_user(&rtoinfo, optval, sizeof (struct sctp_rtoinfo)))
+	len = sizeof(struct sctp_rtoinfo);
+
+	if (copy_from_user(&rtoinfo, optval, len))
 		return -EFAULT;
 
 	asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id);
@@ -4581,11 +4617,12 @@
 	struct list_head *pos;
 	int cnt = 0;
 
-	if (len != sizeof (struct sctp_assocparams))
+	if (len < sizeof (struct sctp_assocparams))
 		return -EINVAL;
 
-	if (copy_from_user(&assocparams, optval,
-			sizeof (struct sctp_assocparams)))
+	len = sizeof(struct sctp_assocparams);
+
+	if (copy_from_user(&assocparams, optval, len))
 		return -EFAULT;
 
 	asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id);
@@ -4671,9 +4708,11 @@
 	struct sctp_sock *sp;
 	struct sctp_association *asoc;
 
-	if (len != sizeof(struct sctp_assoc_value))
+	if (len < sizeof(struct sctp_assoc_value))
 		return -EINVAL;
 
+	len = sizeof(struct sctp_assoc_value);
+
 	if (copy_from_user(&params, optval, len))
 		return -EFAULT;
 
@@ -6084,8 +6123,11 @@
 	 * queued to the backlog.  This prevents a potential race between
 	 * backlog processing on the old socket and new-packet processing
 	 * on the new socket.
+	 *
+	 * The caller has just allocated newsk so we can guarantee that other
+	 * paths won't try to lock it and then oldsk.
 	 */
-	sctp_lock_sock(newsk);
+	lock_sock_nested(newsk, SINGLE_DEPTH_NESTING);
 	sctp_assoc_migrate(assoc, newsk);
 
 	/* If the association on the newsk is already closed before accept()