qed*: RSS indirection based on queue-handles

A step toward having qede agnostic to the queue configurations
in firmware/hardware - let the RSS indirections use queue handles
instead of actual queue indices.

Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index be4121c..88d47d6 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -59,6 +59,7 @@
 #include <linux/random.h>
 #include <net/ip6_checksum.h>
 #include <linux/bitops.h>
+#include <linux/vmalloc.h>
 #include <linux/qed/qede_roce.h>
 #include "qede.h"
 
@@ -177,8 +178,12 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
 {
 	struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
 	struct qed_dev_info *qed_info = &edev->dev_info.common;
+	struct qed_update_vport_params *vport_params;
 	int rc;
 
+	vport_params = vzalloc(sizeof(*vport_params));
+	if (!vport_params)
+		return -ENOMEM;
 	DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
 
 	rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
@@ -186,15 +191,13 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
 	/* Enable/Disable Tx switching for PF */
 	if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
 	    qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
-		struct qed_update_vport_params params;
-
-		memset(&params, 0, sizeof(params));
-		params.vport_id = 0;
-		params.update_tx_switching_flg = 1;
-		params.tx_switching_flg = num_vfs_param ? 1 : 0;
-		edev->ops->vport_update(edev->cdev, &params);
+		vport_params->vport_id = 0;
+		vport_params->update_tx_switching_flg = 1;
+		vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
+		edev->ops->vport_update(edev->cdev, vport_params);
 	}
 
+	vfree(vport_params);
 	return rc;
 }
 #endif
@@ -1504,19 +1507,24 @@ static int qede_stop_txq(struct qede_dev *edev,
 
 static int qede_stop_queues(struct qede_dev *edev)
 {
-	struct qed_update_vport_params vport_update_params;
+	struct qed_update_vport_params *vport_update_params;
 	struct qed_dev *cdev = edev->cdev;
 	struct qede_fastpath *fp;
 	int rc, i;
 
 	/* Disable the vport */
-	memset(&vport_update_params, 0, sizeof(vport_update_params));
-	vport_update_params.vport_id = 0;
-	vport_update_params.update_vport_active_flg = 1;
-	vport_update_params.vport_active_flg = 0;
-	vport_update_params.update_rss_flg = 0;
+	vport_update_params = vzalloc(sizeof(*vport_update_params));
+	if (!vport_update_params)
+		return -ENOMEM;
 
-	rc = edev->ops->vport_update(cdev, &vport_update_params);
+	vport_update_params->vport_id = 0;
+	vport_update_params->update_vport_active_flg = 1;
+	vport_update_params->vport_active_flg = 0;
+	vport_update_params->update_rss_flg = 0;
+
+	rc = edev->ops->vport_update(cdev, vport_update_params);
+	vfree(vport_update_params);
+
 	if (rc) {
 		DP_ERR(edev, "Failed to update vport\n");
 		return rc;
@@ -1628,11 +1636,10 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 {
 	int vlan_removal_en = 1;
 	struct qed_dev *cdev = edev->cdev;
-	struct qed_update_vport_params vport_update_params;
-	struct qed_queue_start_common_params q_params;
 	struct qed_dev_info *qed_info = &edev->dev_info.common;
+	struct qed_update_vport_params *vport_update_params;
+	struct qed_queue_start_common_params q_params;
 	struct qed_start_vport_params start = {0};
-	bool reset_rss_indir = false;
 	int rc, i;
 
 	if (!edev->num_queues) {
@@ -1641,6 +1648,10 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 		return -EINVAL;
 	}
 
+	vport_update_params = vzalloc(sizeof(*vport_update_params));
+	if (!vport_update_params)
+		return -ENOMEM;
+
 	start.gro_enable = !edev->gro_disable;
 	start.mtu = edev->ndev->mtu;
 	start.vport_id = 0;
@@ -1652,7 +1663,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 
 	if (rc) {
 		DP_ERR(edev, "Start V-PORT failed %d\n", rc);
-		return rc;
+		goto out;
 	}
 
 	DP_VERBOSE(edev, NETIF_MSG_IFUP,
@@ -1688,7 +1699,7 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 			if (rc) {
 				DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
 				       rc);
-				return rc;
+				goto out;
 			}
 
 			/* Use the return parameters */
@@ -1704,93 +1715,46 @@ static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
 		if (fp->type & QEDE_FASTPATH_XDP) {
 			rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
 			if (rc)
-				return rc;
+				goto out;
 
 			fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
 			if (IS_ERR(fp->rxq->xdp_prog)) {
 				rc = PTR_ERR(fp->rxq->xdp_prog);
 				fp->rxq->xdp_prog = NULL;
-				return rc;
+				goto out;
 			}
 		}
 
 		if (fp->type & QEDE_FASTPATH_TX) {
 			rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
 			if (rc)
-				return rc;
+				goto out;
 		}
 	}
 
 	/* Prepare and send the vport enable */
-	memset(&vport_update_params, 0, sizeof(vport_update_params));
-	vport_update_params.vport_id = start.vport_id;
-	vport_update_params.update_vport_active_flg = 1;
-	vport_update_params.vport_active_flg = 1;
+	vport_update_params->vport_id = start.vport_id;
+	vport_update_params->update_vport_active_flg = 1;
+	vport_update_params->vport_active_flg = 1;
 
 	if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
 	    qed_info->tx_switching) {
-		vport_update_params.update_tx_switching_flg = 1;
-		vport_update_params.tx_switching_flg = 1;
+		vport_update_params->update_tx_switching_flg = 1;
+		vport_update_params->tx_switching_flg = 1;
 	}
 
-	/* Fill struct with RSS params */
-	if (QEDE_RSS_COUNT(edev) > 1) {
-		vport_update_params.update_rss_flg = 1;
+	qede_fill_rss_params(edev, &vport_update_params->rss_params,
+			     &vport_update_params->update_rss_flg);
 
-		/* Need to validate current RSS config uses valid entries */
-		for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
-			if (edev->rss_params.rss_ind_table[i] >=
-			    QEDE_RSS_COUNT(edev)) {
-				reset_rss_indir = true;
-				break;
-			}
-		}
-
-		if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
-		    reset_rss_indir) {
-			u16 val;
-
-			for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
-				u16 indir_val;
-
-				val = QEDE_RSS_COUNT(edev);
-				indir_val = ethtool_rxfh_indir_default(i, val);
-				edev->rss_params.rss_ind_table[i] = indir_val;
-			}
-			edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
-		}
-
-		if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
-			netdev_rss_key_fill(edev->rss_params.rss_key,
-					    sizeof(edev->rss_params.rss_key));
-			edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
-		}
-
-		if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
-			edev->rss_params.rss_caps = QED_RSS_IPV4 |
-						    QED_RSS_IPV6 |
-						    QED_RSS_IPV4_TCP |
-						    QED_RSS_IPV6_TCP;
-			edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
-		}
-
-		memcpy(&vport_update_params.rss_params, &edev->rss_params,
-		       sizeof(vport_update_params.rss_params));
-	} else {
-		memset(&vport_update_params.rss_params, 0,
-		       sizeof(vport_update_params.rss_params));
-	}
-
-	rc = edev->ops->vport_update(cdev, &vport_update_params);
-	if (rc) {
+	rc = edev->ops->vport_update(cdev, vport_update_params);
+	if (rc)
 		DP_ERR(edev, "Update V-PORT failed %d\n", rc);
-		return rc;
-	}
 
-	return 0;
+out:
+	vfree(vport_update_params);
+	return rc;
 }
 
-
 enum qede_unload_mode {
 	QEDE_UNLOAD_NORMAL,
 };