Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/cooloney/blackfin-2.6

* 'master' of master.kernel.org:/pub/scm/linux/kernel/git/cooloney/blackfin-2.6:
  Blackfin arch: add proper const volatile to addr argument to the read functions
  Blackfin arch: Add definition of dma_mapping_error
  Blackfin arch: move cond_syscall() behind __KERNEL__ like all other architectures
  Blackfin arch: match kernel startup messaage with new linker script
  Blackfin arch: add missing braces around array bfin serial init
  Blackfin arch: update printk to use KERN_EMERG and reformat crash output
  Blackfin arch: update ANOMALY handling
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index b4aec51..d40652a 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -225,13 +225,15 @@
 	 * up here and not be able to take the mmap_sem.  In that case
 	 * we defer the vm_locked accounting to the system workqueue.
 	 */
-	if (context->closing && !down_write_trylock(&mm->mmap_sem)) {
-		INIT_WORK(&umem->work, ib_umem_account);
-		umem->mm   = mm;
-		umem->diff = diff;
+	if (context->closing) {
+		if (!down_write_trylock(&mm->mmap_sem)) {
+			INIT_WORK(&umem->work, ib_umem_account);
+			umem->mm   = mm;
+			umem->diff = diff;
 
-		schedule_work(&umem->work);
-		return;
+			schedule_work(&umem->work);
+			return;
+		}
 	} else
 		down_write(&mm->mmap_sem);
 
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 1095c82..c591616 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -120,7 +120,7 @@
 	props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
 	props->max_res_rd_atom	   = props->max_qp_rd_atom * props->max_qp;
 	props->max_srq		   = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
-	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes;
+	props->max_srq_wr	   = dev->dev->caps.max_srq_wqes - 1;
 	props->max_srq_sge	   = dev->dev->caps.max_srq_sge;
 	props->local_ca_ack_delay  = dev->dev->caps.local_ca_ack_delay;
 	props->atomic_cap	   = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 076a0bb..5ffc464 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -56,13 +56,6 @@
 #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
 
-struct ipoib_cm_id {
-	struct ib_cm_id *id;
-	int flags;
-	u32 remote_qpn;
-	u32 remote_mtu;
-};
-
 static struct ib_qp_attr ipoib_cm_err_attr = {
 	.qp_state = IB_QPS_ERR
 };
@@ -309,6 +302,11 @@
 		return -ENOMEM;
 	p->dev = dev;
 	p->id = cm_id;
+	cm_id->context = p;
+	p->state = IPOIB_CM_RX_LIVE;
+	p->jiffies = jiffies;
+	INIT_LIST_HEAD(&p->list);
+
 	p->qp = ipoib_cm_create_rx_qp(dev, p);
 	if (IS_ERR(p->qp)) {
 		ret = PTR_ERR(p->qp);
@@ -320,24 +318,24 @@
 	if (ret)
 		goto err_modify;
 
+	spin_lock_irq(&priv->lock);
+	queue_delayed_work(ipoib_workqueue,
+			   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
+	/* Add this entry to passive ids list head, but do not re-add it
+	 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
+	p->jiffies = jiffies;
+	if (p->state == IPOIB_CM_RX_LIVE)
+		list_move(&p->list, &priv->cm.passive_ids);
+	spin_unlock_irq(&priv->lock);
+
 	ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
 	if (ret) {
 		ipoib_warn(priv, "failed to send REP: %d\n", ret);
-		goto err_rep;
+		if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
+			ipoib_warn(priv, "unable to move qp to error state\n");
 	}
-
-	cm_id->context = p;
-	p->jiffies = jiffies;
-	p->state = IPOIB_CM_RX_LIVE;
-	spin_lock_irq(&priv->lock);
-	if (list_empty(&priv->cm.passive_ids))
-		queue_delayed_work(ipoib_workqueue,
-				   &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
-	list_add(&p->list, &priv->cm.passive_ids);
-	spin_unlock_irq(&priv->lock);
 	return 0;
 
-err_rep:
 err_modify:
 	ib_destroy_qp(p->qp);
 err_qp:
@@ -754,9 +752,9 @@
 
 	p->mtu = be32_to_cpu(data->mtu);
 
-	if (p->mtu < priv->dev->mtu + IPOIB_ENCAP_LEN) {
-		ipoib_warn(priv, "Rejecting connection: mtu %d < device mtu %d + 4\n",
-			   p->mtu, priv->dev->mtu);
+	if (p->mtu <= IPOIB_ENCAP_LEN) {
+		ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
+			   p->mtu, IPOIB_ENCAP_LEN);
 		return -EINVAL;
 	}
 
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 7ea2d98..356f067 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -67,6 +67,11 @@
 	struct ip_vs_seq        out_seq;        /* outgoing seq. struct */
 };
 
+struct ip_vs_sync_thread_data {
+	struct completion *startup;
+	int state;
+};
+
 #define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
 #define SIMPLE_CONN_SIZE  (sizeof(struct ip_vs_sync_conn))
 #define FULL_CONN_SIZE  \
@@ -751,6 +756,7 @@
 	mm_segment_t oldmm;
 	int state;
 	const char *name;
+	struct ip_vs_sync_thread_data *tinfo = startup;
 
 	/* increase the module use count */
 	ip_vs_use_count_inc();
@@ -789,7 +795,14 @@
 	add_wait_queue(&sync_wait, &wait);
 
 	set_sync_pid(state, current->pid);
-	complete((struct completion *)startup);
+	complete(tinfo->startup);
+
+	/*
+	 * once we call the completion queue above, we should
+	 * null out that reference, since its allocated on the
+	 * stack of the creating kernel thread
+	 */
+	tinfo->startup = NULL;
 
 	/* processing master/backup loop here */
 	if (state == IP_VS_STATE_MASTER)
@@ -801,6 +814,14 @@
 	remove_wait_queue(&sync_wait, &wait);
 
 	/* thread exits */
+
+	/*
+	 * If we weren't explicitly stopped, then we
+	 * exited in error, and should undo our state
+	 */
+	if ((!stop_master_sync) && (!stop_backup_sync))
+		ip_vs_sync_state -= tinfo->state;
+
 	set_sync_pid(state, 0);
 	IP_VS_INFO("sync thread stopped!\n");
 
@@ -812,6 +833,11 @@
 	set_stop_sync(state, 0);
 	wake_up(&stop_sync_wait);
 
+	/*
+	 * we need to free the structure that was allocated
+	 * for us in start_sync_thread
+	 */
+	kfree(tinfo);
 	return 0;
 }
 
@@ -838,11 +864,19 @@
 {
 	DECLARE_COMPLETION_ONSTACK(startup);
 	pid_t pid;
+	struct ip_vs_sync_thread_data *tinfo;
 
 	if ((state == IP_VS_STATE_MASTER && sync_master_pid) ||
 	    (state == IP_VS_STATE_BACKUP && sync_backup_pid))
 		return -EEXIST;
 
+	/*
+	 * Note that tinfo will be freed in sync_thread on exit
+	 */
+	tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL);
+	if (!tinfo)
+		return -ENOMEM;
+
 	IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
 	IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n",
 		  sizeof(struct ip_vs_sync_conn));
@@ -858,8 +892,11 @@
 		ip_vs_backup_syncid = syncid;
 	}
 
+	tinfo->state = state;
+	tinfo->startup = &startup;
+
   repeat:
-	if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) {
+	if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) {
 		IP_VS_ERR("could not create fork_sync_thread due to %d... "
 			  "retrying.\n", pid);
 		msleep_interruptible(1000);
diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
index 591c442..cc9102c 100644
--- a/net/rxrpc/ar-output.c
+++ b/net/rxrpc/ar-output.c
@@ -640,6 +640,7 @@
 			goto efault;
 		sp->remain -= copy;
 		skb->mark += copy;
+		copied += copy;
 
 		len -= copy;
 		segment -= copy;
@@ -709,6 +710,8 @@
 
 	} while (segment > 0);
 
+success:
+	ret = copied;
 out:
 	call->tx_pending = skb;
 	_leave(" = %d", ret);
@@ -725,7 +728,7 @@
 
 maybe_error:
 	if (copied)
-		ret = copied;
+		goto success;
 	goto out;
 
 efault:
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 85f3f43..dfacb9c 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1729,7 +1729,7 @@
 	    x->type && x->type->get_mtu)
 		res = x->type->get_mtu(x, mtu);
 	else
-		res = mtu;
+		res = mtu - x->props.header_len;
 	spin_unlock_bh(&x->lock);
 	return res;
 }