Merge branches 'cma', 'ipoib', 'iser', 'mlx4' and 'nes' into for-next
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 26b3760..1983adc 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2648,8 +2648,8 @@
 	req.responder_resources = conn_param->responder_resources;
 	req.initiator_depth = conn_param->initiator_depth;
 	req.flow_control = conn_param->flow_control;
-	req.retry_count = conn_param->retry_count;
-	req.rnr_retry_count = conn_param->rnr_retry_count;
+	req.retry_count = min_t(u8, 7, conn_param->retry_count);
+	req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
 	req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
 	req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
 	req.max_cm_retries = CMA_MAX_CM_RETRIES;
@@ -2770,7 +2770,7 @@
 	rep.initiator_depth = conn_param->initiator_depth;
 	rep.failover_accepted = 0;
 	rep.flow_control = conn_param->flow_control;
-	rep.rnr_retry_count = conn_param->rnr_retry_count;
+	rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
 	rep.srq = id_priv->srq ? 1 : 0;
 
 	ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 196eb52..07ca6fd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -535,14 +535,14 @@
 void ipoib_set_ethtool_ops(struct net_device *dev);
 int ipoib_set_dev_features(struct ipoib_dev_priv *priv, struct ib_device *hca);
 
-#ifdef CONFIG_INFINIBAND_IPOIB_CM
-
 #define IPOIB_FLAGS_RC		0x80
 #define IPOIB_FLAGS_UC		0x40
 
 /* We don't support UC connections at the moment */
 #define IPOIB_CM_SUPPORTED(ha)   (ha[0] & (IPOIB_FLAGS_RC))
 
+#ifdef CONFIG_INFINIBAND_IPOIB_CM
+
 extern int ipoib_max_conn_qp;
 
 static inline int ipoib_cm_admin_enabled(struct net_device *dev)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 175581c..72ae63f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1448,37 +1448,6 @@
 		return sprintf(buf, "datagram\n");
 }
 
-int ipoib_set_mode(struct net_device *dev, const char *buf)
-{
-	struct ipoib_dev_priv *priv = netdev_priv(dev);
-
-	/* flush paths if we switch modes so that connections are restarted */
-	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
-		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
-		ipoib_warn(priv, "enabling connected mode "
-			   "will cause multicast packet drops\n");
-		netdev_update_features(dev);
-		rtnl_unlock();
-		priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
-
-		ipoib_flush_paths(dev);
-		rtnl_lock();
-		return 0;
-	}
-
-	if (!strcmp(buf, "datagram\n")) {
-		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
-		netdev_update_features(dev);
-		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
-		rtnl_unlock();
-		ipoib_flush_paths(dev);
-		rtnl_lock();
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
 static ssize_t set_mode(struct device *d, struct device_attribute *attr,
 			const char *buf, size_t count)
 {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index d576c7a..6fdc9e7 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -215,6 +215,37 @@
 	return 0;
 }
 
+int ipoib_set_mode(struct net_device *dev, const char *buf)
+{
+	struct ipoib_dev_priv *priv = netdev_priv(dev);
+
+	/* flush paths if we switch modes so that connections are restarted */
+	if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
+		set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+		ipoib_warn(priv, "enabling connected mode "
+			   "will cause multicast packet drops\n");
+		netdev_update_features(dev);
+		rtnl_unlock();
+		priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
+
+		ipoib_flush_paths(dev);
+		rtnl_lock();
+		return 0;
+	}
+
+	if (!strcmp(buf, "datagram\n")) {
+		clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
+		netdev_update_features(dev);
+		dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
+		rtnl_unlock();
+		ipoib_flush_paths(dev);
+		rtnl_lock();
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
 static struct ipoib_path *__path_find(struct net_device *dev, void *gid)
 {
 	struct ipoib_dev_priv *priv = netdev_priv(dev);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 296be43..ef7d3be 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -177,6 +177,7 @@
 
 /* fwd declarations */
 struct iser_device;
+struct iser_cq_desc;
 struct iscsi_iser_conn;
 struct iscsi_iser_task;
 struct iscsi_endpoint;
@@ -226,16 +227,21 @@
 	char		             pad[ISER_RX_PAD_SIZE];
 } __attribute__((packed));
 
+#define ISER_MAX_CQ 4
+
 struct iser_device {
 	struct ib_device             *ib_device;
 	struct ib_pd	             *pd;
-	struct ib_cq	             *rx_cq;
-	struct ib_cq	             *tx_cq;
+	struct ib_cq	             *rx_cq[ISER_MAX_CQ];
+	struct ib_cq	             *tx_cq[ISER_MAX_CQ];
 	struct ib_mr	             *mr;
-	struct tasklet_struct	     cq_tasklet;
+	struct tasklet_struct	     cq_tasklet[ISER_MAX_CQ];
 	struct ib_event_handler      event_handler;
 	struct list_head             ig_list; /* entry in ig devices list */
 	int                          refcount;
+	int                          cq_active_qps[ISER_MAX_CQ];
+	int			     cqs_used;
+	struct iser_cq_desc	     *cq_desc;
 };
 
 struct iser_conn {
@@ -287,6 +293,11 @@
 	int data_size;
 };
 
+struct iser_cq_desc {
+	struct iser_device           *device;
+	int                          cq_index;
+};
+
 struct iser_global {
 	struct mutex      device_list_mutex;/*                   */
 	struct list_head  device_list;	     /* all iSER devices */
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2dddabd..95a49af 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -70,32 +70,50 @@
  */
 static int iser_create_device_ib_res(struct iser_device *device)
 {
+	int i, j;
+	struct iser_cq_desc *cq_desc;
+
+	device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
+	iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used,
+		 device->ib_device->name, device->ib_device->num_comp_vectors);
+
+	device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used,
+				  GFP_KERNEL);
+	if (device->cq_desc == NULL)
+		goto cq_desc_err;
+	cq_desc = device->cq_desc;
+
 	device->pd = ib_alloc_pd(device->ib_device);
 	if (IS_ERR(device->pd))
 		goto pd_err;
 
-	device->rx_cq = ib_create_cq(device->ib_device,
-				  iser_cq_callback,
-				  iser_cq_event_callback,
-				  (void *)device,
-				  ISER_MAX_RX_CQ_LEN, 0);
-	if (IS_ERR(device->rx_cq))
-		goto rx_cq_err;
+	for (i = 0; i < device->cqs_used; i++) {
+		cq_desc[i].device   = device;
+		cq_desc[i].cq_index = i;
 
-	device->tx_cq = ib_create_cq(device->ib_device,
-				  NULL, iser_cq_event_callback,
-				  (void *)device,
-				  ISER_MAX_TX_CQ_LEN, 0);
+		device->rx_cq[i] = ib_create_cq(device->ib_device,
+					  iser_cq_callback,
+					  iser_cq_event_callback,
+					  (void *)&cq_desc[i],
+					  ISER_MAX_RX_CQ_LEN, i);
+		if (IS_ERR(device->rx_cq[i]))
+			goto cq_err;
 
-	if (IS_ERR(device->tx_cq))
-		goto tx_cq_err;
+		device->tx_cq[i] = ib_create_cq(device->ib_device,
+					  NULL, iser_cq_event_callback,
+					  (void *)&cq_desc[i],
+					  ISER_MAX_TX_CQ_LEN, i);
 
-	if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
-		goto cq_arm_err;
+		if (IS_ERR(device->tx_cq[i]))
+			goto cq_err;
 
-	tasklet_init(&device->cq_tasklet,
-		     iser_cq_tasklet_fn,
-		     (unsigned long)device);
+		if (ib_req_notify_cq(device->rx_cq[i], IB_CQ_NEXT_COMP))
+			goto cq_err;
+
+		tasklet_init(&device->cq_tasklet[i],
+			     iser_cq_tasklet_fn,
+			(unsigned long)&cq_desc[i]);
+	}
 
 	device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
 				   IB_ACCESS_REMOTE_WRITE |
@@ -113,14 +131,19 @@
 handler_err:
 	ib_dereg_mr(device->mr);
 dma_mr_err:
-	tasklet_kill(&device->cq_tasklet);
-cq_arm_err:
-	ib_destroy_cq(device->tx_cq);
-tx_cq_err:
-	ib_destroy_cq(device->rx_cq);
-rx_cq_err:
+	for (j = 0; j < device->cqs_used; j++)
+		tasklet_kill(&device->cq_tasklet[j]);
+cq_err:
+	for (j = 0; j < i; j++) {
+		if (device->tx_cq[j])
+			ib_destroy_cq(device->tx_cq[j]);
+		if (device->rx_cq[j])
+			ib_destroy_cq(device->rx_cq[j]);
+	}
 	ib_dealloc_pd(device->pd);
 pd_err:
+	kfree(device->cq_desc);
+cq_desc_err:
 	iser_err("failed to allocate an IB resource\n");
 	return -1;
 }
@@ -131,18 +154,24 @@
  */
 static void iser_free_device_ib_res(struct iser_device *device)
 {
+	int i;
 	BUG_ON(device->mr == NULL);
 
-	tasklet_kill(&device->cq_tasklet);
+	for (i = 0; i < device->cqs_used; i++) {
+		tasklet_kill(&device->cq_tasklet[i]);
+		(void)ib_destroy_cq(device->tx_cq[i]);
+		(void)ib_destroy_cq(device->rx_cq[i]);
+		device->tx_cq[i] = NULL;
+		device->rx_cq[i] = NULL;
+	}
+
 	(void)ib_unregister_event_handler(&device->event_handler);
 	(void)ib_dereg_mr(device->mr);
-	(void)ib_destroy_cq(device->tx_cq);
-	(void)ib_destroy_cq(device->rx_cq);
 	(void)ib_dealloc_pd(device->pd);
 
+	kfree(device->cq_desc);
+
 	device->mr = NULL;
-	device->tx_cq = NULL;
-	device->rx_cq = NULL;
 	device->pd = NULL;
 }
 
@@ -157,6 +186,7 @@
 	struct ib_qp_init_attr	init_attr;
 	int			req_err, resp_err, ret = -ENOMEM;
 	struct ib_fmr_pool_param params;
+	int index, min_index = 0;
 
 	BUG_ON(ib_conn->device == NULL);
 
@@ -220,10 +250,20 @@
 
 	memset(&init_attr, 0, sizeof init_attr);
 
+	mutex_lock(&ig.connlist_mutex);
+	/* select the CQ with the minimal number of usages */
+	for (index = 0; index < device->cqs_used; index++)
+		if (device->cq_active_qps[index] <
+		    device->cq_active_qps[min_index])
+			min_index = index;
+	device->cq_active_qps[min_index]++;
+	mutex_unlock(&ig.connlist_mutex);
+	iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn);
+
 	init_attr.event_handler = iser_qp_event_callback;
 	init_attr.qp_context	= (void *)ib_conn;
-	init_attr.send_cq	= device->tx_cq;
-	init_attr.recv_cq	= device->rx_cq;
+	init_attr.send_cq	= device->tx_cq[min_index];
+	init_attr.recv_cq	= device->rx_cq[min_index];
 	init_attr.cap.max_send_wr  = ISER_QP_MAX_REQ_DTOS;
 	init_attr.cap.max_recv_wr  = ISER_QP_MAX_RECV_DTOS;
 	init_attr.cap.max_send_sge = 2;
@@ -252,6 +292,7 @@
  */
 static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id)
 {
+	int cq_index;
 	BUG_ON(ib_conn == NULL);
 
 	iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
@@ -262,9 +303,12 @@
 	if (ib_conn->fmr_pool != NULL)
 		ib_destroy_fmr_pool(ib_conn->fmr_pool);
 
-	if (ib_conn->qp != NULL)
-		rdma_destroy_qp(ib_conn->cma_id);
+	if (ib_conn->qp != NULL) {
+		cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
+		ib_conn->device->cq_active_qps[cq_index]--;
 
+		rdma_destroy_qp(ib_conn->cma_id);
+	}
 	/* if cma handler context, the caller acts s.t the cma destroy the id */
 	if (ib_conn->cma_id != NULL && can_destroy_id)
 		rdma_destroy_id(ib_conn->cma_id);
@@ -791,9 +835,9 @@
 	}
 }
 
-static int iser_drain_tx_cq(struct iser_device  *device)
+static int iser_drain_tx_cq(struct iser_device  *device, int cq_index)
 {
-	struct ib_cq  *cq = device->tx_cq;
+	struct ib_cq  *cq = device->tx_cq[cq_index];
 	struct ib_wc  wc;
 	struct iser_tx_desc *tx_desc;
 	struct iser_conn *ib_conn;
@@ -822,8 +866,10 @@
 
 static void iser_cq_tasklet_fn(unsigned long data)
 {
-	 struct iser_device  *device = (struct iser_device *)data;
-	 struct ib_cq	     *cq = device->rx_cq;
+	struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)data;
+	struct iser_device  *device = cq_desc->device;
+	int cq_index = cq_desc->cq_index;
+	struct ib_cq	     *cq = device->rx_cq[cq_index];
 	 struct ib_wc	     wc;
 	 struct iser_rx_desc *desc;
 	 unsigned long	     xfer_len;
@@ -851,19 +897,21 @@
 		}
 		completed_rx++;
 		if (!(completed_rx & 63))
-			completed_tx += iser_drain_tx_cq(device);
+			completed_tx += iser_drain_tx_cq(device, cq_index);
 	}
 	/* #warning "it is assumed here that arming CQ only once its empty" *
 	 * " would not cause interrupts to be missed"                       */
 	ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
 
-	completed_tx += iser_drain_tx_cq(device);
+	completed_tx += iser_drain_tx_cq(device, cq_index);
 	iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
 }
 
 static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
 {
-	struct iser_device  *device = (struct iser_device *)cq_context;
+	struct iser_cq_desc *cq_desc = (struct iser_cq_desc *)cq_context;
+	struct iser_device  *device = cq_desc->device;
+	int cq_index = cq_desc->cq_index;
 
-	tasklet_schedule(&device->cq_tasklet);
+	tasklet_schedule(&device->cq_tasklet[cq_index]);
 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index ba6506f..926c911 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3094,6 +3094,8 @@
 		if (validate_eth_header_mac(slave, rule_header, rlist))
 			return -EINVAL;
 		break;
+	case MLX4_NET_TRANS_RULE_ID_IB:
+		break;
 	case MLX4_NET_TRANS_RULE_ID_IPV4:
 	case MLX4_NET_TRANS_RULE_ID_TCP:
 	case MLX4_NET_TRANS_RULE_ID_UDP: