blob: 71c68c25d01a01eb455a653b411b75209142947d [file] [log] [blame]
Yuval Mintze712d522015-10-26 11:02:27 +02001/* QLogic qede NIC Driver
2* Copyright (c) 2015 QLogic Corporation
3*
4* This software is available under the terms of the GNU General Public License
5* (GPL) Version 2, available from the file COPYING in the main directory of
6* this source tree.
7*/
8
9#include <linux/module.h>
10#include <linux/pci.h>
11#include <linux/version.h>
12#include <linux/device.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/skbuff.h>
16#include <linux/errno.h>
17#include <linux/list.h>
18#include <linux/string.h>
19#include <linux/dma-mapping.h>
20#include <linux/interrupt.h>
21#include <asm/byteorder.h>
22#include <asm/param.h>
23#include <linux/io.h>
24#include <linux/netdev_features.h>
25#include <linux/udp.h>
26#include <linux/tcp.h>
Alexander Duyckf9f082a2016-06-16 12:22:57 -070027#include <net/udp_tunnel.h>
Yuval Mintze712d522015-10-26 11:02:27 +020028#include <linux/ip.h>
29#include <net/ipv6.h>
30#include <net/tcp.h>
31#include <linux/if_ether.h>
32#include <linux/if_vlan.h>
33#include <linux/pkt_sched.h>
34#include <linux/ethtool.h>
35#include <linux/in.h>
36#include <linux/random.h>
37#include <net/ip6_checksum.h>
38#include <linux/bitops.h>
Ram Amranicee9fbd2016-10-01 21:59:56 +030039#include <linux/qed/qede_roce.h>
Yuval Mintze712d522015-10-26 11:02:27 +020040#include "qede.h"
41
Yuval Mintz5abd7e922016-02-24 16:52:50 +020042static char version[] =
43 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
Yuval Mintze712d522015-10-26 11:02:27 +020044
Yuval Mintz5abd7e922016-02-24 16:52:50 +020045MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
Yuval Mintze712d522015-10-26 11:02:27 +020046MODULE_LICENSE("GPL");
47MODULE_VERSION(DRV_MODULE_VERSION);
48
49static uint debug;
50module_param(debug, uint, 0);
51MODULE_PARM_DESC(debug, " Default debug msglevel");
52
53static const struct qed_eth_ops *qed_ops;
54
55#define CHIP_NUM_57980S_40 0x1634
Yuval Mintz0e7441d2016-02-24 16:52:45 +020056#define CHIP_NUM_57980S_10 0x1666
Yuval Mintze712d522015-10-26 11:02:27 +020057#define CHIP_NUM_57980S_MF 0x1636
58#define CHIP_NUM_57980S_100 0x1644
59#define CHIP_NUM_57980S_50 0x1654
60#define CHIP_NUM_57980S_25 0x1656
Yuval Mintzfefb0202016-05-11 16:36:19 +030061#define CHIP_NUM_57980S_IOV 0x1664
Yuval Mintze712d522015-10-26 11:02:27 +020062
63#ifndef PCI_DEVICE_ID_NX2_57980E
64#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
65#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
66#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
67#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
68#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
69#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
Yuval Mintzfefb0202016-05-11 16:36:19 +030070#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
Yuval Mintze712d522015-10-26 11:02:27 +020071#endif
72
Yuval Mintzfefb0202016-05-11 16:36:19 +030073enum qede_pci_private {
74 QEDE_PRIVATE_PF,
75 QEDE_PRIVATE_VF
76};
77
Yuval Mintze712d522015-10-26 11:02:27 +020078static const struct pci_device_id qede_pci_tbl[] = {
Yuval Mintzfefb0202016-05-11 16:36:19 +030079 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
80 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
81 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
82 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
83 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
84 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
Arnd Bergmann14b84e82016-06-01 15:29:13 +020085#ifdef CONFIG_QED_SRIOV
Yuval Mintzfefb0202016-05-11 16:36:19 +030086 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
Arnd Bergmann14b84e82016-06-01 15:29:13 +020087#endif
Yuval Mintze712d522015-10-26 11:02:27 +020088 { 0 }
89};
90
91MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
92
93static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
94
95#define TX_TIMEOUT (5 * HZ)
96
97static void qede_remove(struct pci_dev *pdev);
Mintz, Yuval14d39642016-10-31 07:14:23 +020098static void qede_shutdown(struct pci_dev *pdev);
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +020099static void qede_link_update(void *dev, struct qed_link_output *link);
Yuval Mintze712d522015-10-26 11:02:27 +0200100
Mintz, Yuval567b3c12016-11-29 16:47:05 +0200101/* The qede lock is used to protect driver state change and driver flows that
102 * are not reentrant.
103 */
104void __qede_lock(struct qede_dev *edev)
105{
106 mutex_lock(&edev->qede_lock);
107}
108
109void __qede_unlock(struct qede_dev *edev)
110{
111 mutex_unlock(&edev->qede_lock);
112}
113
Yuval Mintzfefb0202016-05-11 16:36:19 +0300114#ifdef CONFIG_QED_SRIOV
Moshe Shemesh79aab092016-09-22 12:11:15 +0300115static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
116 __be16 vlan_proto)
Yuval Mintz08feecd2016-05-11 16:36:20 +0300117{
118 struct qede_dev *edev = netdev_priv(ndev);
119
120 if (vlan > 4095) {
121 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
122 return -EINVAL;
123 }
124
Moshe Shemesh79aab092016-09-22 12:11:15 +0300125 if (vlan_proto != htons(ETH_P_8021Q))
126 return -EPROTONOSUPPORT;
127
Yuval Mintz08feecd2016-05-11 16:36:20 +0300128 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
129 vlan, vf);
130
131 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
132}
133
Yuval Mintzeff16962016-05-11 16:36:21 +0300134static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
135{
136 struct qede_dev *edev = netdev_priv(ndev);
137
138 DP_VERBOSE(edev, QED_MSG_IOV,
139 "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
140 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
141
142 if (!is_valid_ether_addr(mac)) {
143 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
144 return -EINVAL;
145 }
146
147 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
148}
149
Yuval Mintzfefb0202016-05-11 16:36:19 +0300150static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
151{
152 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300153 struct qed_dev_info *qed_info = &edev->dev_info.common;
154 int rc;
Yuval Mintzfefb0202016-05-11 16:36:19 +0300155
156 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
157
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300158 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
159
160 /* Enable/Disable Tx switching for PF */
161 if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
162 qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
163 struct qed_update_vport_params params;
164
165 memset(&params, 0, sizeof(params));
166 params.vport_id = 0;
167 params.update_tx_switching_flg = 1;
168 params.tx_switching_flg = num_vfs_param ? 1 : 0;
169 edev->ops->vport_update(edev->cdev, &params);
170 }
171
172 return rc;
Yuval Mintzfefb0202016-05-11 16:36:19 +0300173}
174#endif
175
Yuval Mintze712d522015-10-26 11:02:27 +0200176static struct pci_driver qede_pci_driver = {
177 .name = "qede",
178 .id_table = qede_pci_tbl,
179 .probe = qede_probe,
180 .remove = qede_remove,
Mintz, Yuval14d39642016-10-31 07:14:23 +0200181 .shutdown = qede_shutdown,
Yuval Mintzfefb0202016-05-11 16:36:19 +0300182#ifdef CONFIG_QED_SRIOV
183 .sriov_configure = qede_sriov_configure,
184#endif
Yuval Mintze712d522015-10-26 11:02:27 +0200185};
186
Yuval Mintzc3aaa402016-10-14 05:19:17 -0400187static void qede_force_mac(void *dev, u8 *mac, bool forced)
Yuval Mintzeff16962016-05-11 16:36:21 +0300188{
189 struct qede_dev *edev = dev;
190
Yuval Mintzc3aaa402016-10-14 05:19:17 -0400191 /* MAC hints take effect only if we haven't set one already */
192 if (is_valid_ether_addr(edev->ndev->dev_addr) && !forced)
193 return;
194
Yuval Mintzeff16962016-05-11 16:36:21 +0300195 ether_addr_copy(edev->ndev->dev_addr, mac);
196 ether_addr_copy(edev->primary_mac, mac);
197}
198
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +0200199static struct qed_eth_cb_ops qede_ll_ops = {
200 {
201 .link_update = qede_link_update,
202 },
Yuval Mintzeff16962016-05-11 16:36:21 +0300203 .force_mac = qede_force_mac,
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +0200204};
205
Yuval Mintz29502192015-10-26 11:02:29 +0200206static int qede_netdev_event(struct notifier_block *this, unsigned long event,
207 void *ptr)
208{
209 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
210 struct ethtool_drvinfo drvinfo;
211 struct qede_dev *edev;
212
Ram Amranicee9fbd2016-10-01 21:59:56 +0300213 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
Yuval Mintz29502192015-10-26 11:02:29 +0200214 goto done;
215
216 /* Check whether this is a qede device */
217 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
218 goto done;
219
220 memset(&drvinfo, 0, sizeof(drvinfo));
221 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
222 if (strcmp(drvinfo.driver, "qede"))
223 goto done;
224 edev = netdev_priv(ndev);
225
Ram Amranicee9fbd2016-10-01 21:59:56 +0300226 switch (event) {
227 case NETDEV_CHANGENAME:
228 /* Notify qed of the name change */
229 if (!edev->ops || !edev->ops->common)
230 goto done;
231 edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
232 break;
233 case NETDEV_CHANGEADDR:
234 edev = netdev_priv(ndev);
235 qede_roce_event_changeaddr(edev);
236 break;
237 }
Yuval Mintz29502192015-10-26 11:02:29 +0200238
239done:
240 return NOTIFY_DONE;
241}
242
243static struct notifier_block qede_netdev_notifier = {
244 .notifier_call = qede_netdev_event,
245};
246
Yuval Mintze712d522015-10-26 11:02:27 +0200247static
248int __init qede_init(void)
249{
250 int ret;
Yuval Mintze712d522015-10-26 11:02:27 +0200251
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300252 pr_info("qede_init: %s\n", version);
Yuval Mintze712d522015-10-26 11:02:27 +0200253
Rahul Verma95114342016-04-10 12:42:59 +0300254 qed_ops = qed_get_eth_ops();
Yuval Mintze712d522015-10-26 11:02:27 +0200255 if (!qed_ops) {
256 pr_notice("Failed to get qed ethtool operations\n");
257 return -EINVAL;
258 }
259
Yuval Mintz29502192015-10-26 11:02:29 +0200260 /* Must register notifier before pci ops, since we might miss
261 * interface rename after pci probe and netdev registeration.
262 */
263 ret = register_netdevice_notifier(&qede_netdev_notifier);
264 if (ret) {
265 pr_notice("Failed to register netdevice_notifier\n");
266 qed_put_eth_ops();
267 return -EINVAL;
268 }
269
Yuval Mintze712d522015-10-26 11:02:27 +0200270 ret = pci_register_driver(&qede_pci_driver);
271 if (ret) {
272 pr_notice("Failed to register driver\n");
Yuval Mintz29502192015-10-26 11:02:29 +0200273 unregister_netdevice_notifier(&qede_netdev_notifier);
Yuval Mintze712d522015-10-26 11:02:27 +0200274 qed_put_eth_ops();
275 return -EINVAL;
276 }
277
278 return 0;
279}
280
281static void __exit qede_cleanup(void)
282{
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300283 if (debug & QED_LOG_INFO_MASK)
284 pr_info("qede_cleanup called\n");
Yuval Mintze712d522015-10-26 11:02:27 +0200285
Yuval Mintz29502192015-10-26 11:02:29 +0200286 unregister_netdevice_notifier(&qede_netdev_notifier);
Yuval Mintze712d522015-10-26 11:02:27 +0200287 pci_unregister_driver(&qede_pci_driver);
288 qed_put_eth_ops();
289}
290
291module_init(qede_init);
292module_exit(qede_cleanup);
293
294/* -------------------------------------------------------------------------
Yuval Mintz29502192015-10-26 11:02:29 +0200295 * START OF FAST-PATH
296 * -------------------------------------------------------------------------
297 */
298
299/* Unmap the data and free skb */
300static int qede_free_tx_pkt(struct qede_dev *edev,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300301 struct qede_tx_queue *txq, int *len)
Yuval Mintz29502192015-10-26 11:02:29 +0200302{
303 u16 idx = txq->sw_tx_cons & NUM_TX_BDS_MAX;
304 struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
305 struct eth_tx_1st_bd *first_bd;
306 struct eth_tx_bd *tx_data_bd;
307 int bds_consumed = 0;
308 int nbds;
309 bool data_split = txq->sw_tx_ring[idx].flags & QEDE_TSO_SPLIT_BD;
310 int i, split_bd_len = 0;
311
312 if (unlikely(!skb)) {
313 DP_ERR(edev,
314 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
315 idx, txq->sw_tx_cons, txq->sw_tx_prod);
316 return -1;
317 }
318
319 *len = skb->len;
320
321 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
322
323 bds_consumed++;
324
325 nbds = first_bd->data.nbds;
326
327 if (data_split) {
328 struct eth_tx_bd *split = (struct eth_tx_bd *)
329 qed_chain_consume(&txq->tx_pbl);
330 split_bd_len = BD_UNMAP_LEN(split);
331 bds_consumed++;
332 }
Manish Choprafabd5452016-10-21 04:43:45 -0400333 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
334 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
Yuval Mintz29502192015-10-26 11:02:29 +0200335
336 /* Unmap the data of the skb frags */
337 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
338 tx_data_bd = (struct eth_tx_bd *)
339 qed_chain_consume(&txq->tx_pbl);
340 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
341 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
342 }
343
344 while (bds_consumed++ < nbds)
345 qed_chain_consume(&txq->tx_pbl);
346
347 /* Free skb */
348 dev_kfree_skb_any(skb);
349 txq->sw_tx_ring[idx].skb = NULL;
350 txq->sw_tx_ring[idx].flags = 0;
351
352 return 0;
353}
354
355/* Unmap the data and free skb when mapping failed during start_xmit */
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200356static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
Yuval Mintz29502192015-10-26 11:02:29 +0200357 struct eth_tx_1st_bd *first_bd,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300358 int nbd, bool data_split)
Yuval Mintz29502192015-10-26 11:02:29 +0200359{
360 u16 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
361 struct sk_buff *skb = txq->sw_tx_ring[idx].skb;
362 struct eth_tx_bd *tx_data_bd;
363 int i, split_bd_len = 0;
364
365 /* Return prod to its position before this skb was handled */
366 qed_chain_set_prod(&txq->tx_pbl,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300367 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
Yuval Mintz29502192015-10-26 11:02:29 +0200368
369 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
370
371 if (data_split) {
372 struct eth_tx_bd *split = (struct eth_tx_bd *)
373 qed_chain_produce(&txq->tx_pbl);
374 split_bd_len = BD_UNMAP_LEN(split);
375 nbd--;
376 }
377
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200378 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
Manish Choprafabd5452016-10-21 04:43:45 -0400379 BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
Yuval Mintz29502192015-10-26 11:02:29 +0200380
381 /* Unmap the data of the skb frags */
382 for (i = 0; i < nbd; i++) {
383 tx_data_bd = (struct eth_tx_bd *)
384 qed_chain_produce(&txq->tx_pbl);
385 if (tx_data_bd->nbytes)
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200386 dma_unmap_page(txq->dev,
Yuval Mintz29502192015-10-26 11:02:29 +0200387 BD_UNMAP_ADDR(tx_data_bd),
388 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
389 }
390
391 /* Return again prod to its position before this skb was handled */
392 qed_chain_set_prod(&txq->tx_pbl,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300393 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
Yuval Mintz29502192015-10-26 11:02:29 +0200394
395 /* Free skb */
396 dev_kfree_skb_any(skb);
397 txq->sw_tx_ring[idx].skb = NULL;
398 txq->sw_tx_ring[idx].flags = 0;
399}
400
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200401static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
Yuval Mintz29502192015-10-26 11:02:29 +0200402{
403 u32 rc = XMIT_L4_CSUM;
404 __be16 l3_proto;
405
406 if (skb->ip_summed != CHECKSUM_PARTIAL)
407 return XMIT_PLAIN;
408
409 l3_proto = vlan_get_protocol(skb);
410 if (l3_proto == htons(ETH_P_IPV6) &&
411 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
412 *ipv6_ext = 1;
413
Manish Chopraa1502412016-10-14 05:19:18 -0400414 if (skb->encapsulation) {
Manish Chopra14db81d2016-04-14 01:38:33 -0400415 rc |= XMIT_ENC;
Manish Chopraa1502412016-10-14 05:19:18 -0400416 if (skb_is_gso(skb)) {
417 unsigned short gso_type = skb_shinfo(skb)->gso_type;
418
419 if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
420 (gso_type & SKB_GSO_GRE_CSUM))
421 rc |= XMIT_ENC_GSO_L4_CSUM;
422
423 rc |= XMIT_LSO;
424 return rc;
425 }
426 }
Manish Chopra14db81d2016-04-14 01:38:33 -0400427
Yuval Mintz29502192015-10-26 11:02:29 +0200428 if (skb_is_gso(skb))
429 rc |= XMIT_LSO;
430
431 return rc;
432}
433
434static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
435 struct eth_tx_2nd_bd *second_bd,
436 struct eth_tx_3rd_bd *third_bd)
437{
438 u8 l4_proto;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500439 u16 bd2_bits1 = 0, bd2_bits2 = 0;
Yuval Mintz29502192015-10-26 11:02:29 +0200440
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500441 bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
Yuval Mintz29502192015-10-26 11:02:29 +0200442
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500443 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
Yuval Mintz29502192015-10-26 11:02:29 +0200444 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
445 << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
446
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500447 bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
Yuval Mintz29502192015-10-26 11:02:29 +0200448 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
449
450 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
451 l4_proto = ipv6_hdr(skb)->nexthdr;
452 else
453 l4_proto = ip_hdr(skb)->protocol;
454
455 if (l4_proto == IPPROTO_UDP)
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500456 bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
Yuval Mintz29502192015-10-26 11:02:29 +0200457
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500458 if (third_bd)
Yuval Mintz29502192015-10-26 11:02:29 +0200459 third_bd->data.bitfields |=
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500460 cpu_to_le16(((tcp_hdrlen(skb) / 4) &
461 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
462 ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
Yuval Mintz29502192015-10-26 11:02:29 +0200463
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500464 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
Yuval Mintz29502192015-10-26 11:02:29 +0200465 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
466}
467
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200468static int map_frag_to_bd(struct qede_tx_queue *txq,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300469 skb_frag_t *frag, struct eth_tx_bd *bd)
Yuval Mintz29502192015-10-26 11:02:29 +0200470{
471 dma_addr_t mapping;
472
473 /* Map skb non-linear frag data for DMA */
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200474 mapping = skb_frag_dma_map(txq->dev, frag, 0,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300475 skb_frag_size(frag), DMA_TO_DEVICE);
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200476 if (unlikely(dma_mapping_error(txq->dev, mapping)))
Yuval Mintz29502192015-10-26 11:02:29 +0200477 return -ENOMEM;
Yuval Mintz29502192015-10-26 11:02:29 +0200478
479 /* Setup the data pointer of the frag data */
480 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
481
482 return 0;
483}
484
Manish Chopra14db81d2016-04-14 01:38:33 -0400485static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
486{
487 if (is_encap_pkt)
488 return (skb_inner_transport_header(skb) +
489 inner_tcp_hdrlen(skb) - skb->data);
490 else
491 return (skb_transport_header(skb) +
492 tcp_hdrlen(skb) - skb->data);
493}
494
Yuval Mintzb1199b12016-02-24 16:52:46 +0200495/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
496#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200497static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
Yuval Mintzb1199b12016-02-24 16:52:46 +0200498{
499 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
500
501 if (xmit_type & XMIT_LSO) {
502 int hlen;
503
Manish Chopra14db81d2016-04-14 01:38:33 -0400504 hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
Yuval Mintzb1199b12016-02-24 16:52:46 +0200505
506 /* linear payload would require its own BD */
507 if (skb_headlen(skb) > hlen)
508 allowed_frags--;
509 }
510
511 return (skb_shinfo(skb)->nr_frags > allowed_frags);
512}
513#endif
514
Manish Chopra312e0672016-06-30 02:35:20 -0400515static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
516{
517 /* wmb makes sure that the BDs data is updated before updating the
518 * producer, otherwise FW may read old data from the BDs.
519 */
520 wmb();
521 barrier();
522 writel(txq->tx_db.raw, txq->doorbell_addr);
523
524 /* mmiowb is needed to synchronize doorbell writes from more than one
525 * processor. It guarantees that the write arrives to the device before
526 * the queue lock is released and another start_xmit is called (possibly
527 * on another CPU). Without this barrier, the next doorbell can bypass
528 * this doorbell. This is applicable to IA64/Altix systems.
529 */
530 mmiowb();
531}
532
Yuval Mintz29502192015-10-26 11:02:29 +0200533/* Main transmit function */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300534static netdev_tx_t qede_start_xmit(struct sk_buff *skb,
535 struct net_device *ndev)
Yuval Mintz29502192015-10-26 11:02:29 +0200536{
537 struct qede_dev *edev = netdev_priv(ndev);
538 struct netdev_queue *netdev_txq;
539 struct qede_tx_queue *txq;
540 struct eth_tx_1st_bd *first_bd;
541 struct eth_tx_2nd_bd *second_bd = NULL;
542 struct eth_tx_3rd_bd *third_bd = NULL;
543 struct eth_tx_bd *tx_data_bd = NULL;
544 u16 txq_index;
545 u8 nbd = 0;
546 dma_addr_t mapping;
547 int rc, frag_idx = 0, ipv6_ext = 0;
548 u8 xmit_type;
549 u16 idx;
550 u16 hlen;
Dan Carpenter810810f2016-05-05 16:21:30 +0300551 bool data_split = false;
Yuval Mintz29502192015-10-26 11:02:29 +0200552
553 /* Get tx-queue context and netdev index */
554 txq_index = skb_get_queue_mapping(skb);
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400555 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev));
Mintz, Yuval80439a12016-11-29 16:47:02 +0200556 txq = edev->fp_array[edev->fp_num_rx + txq_index].txq;
Yuval Mintz29502192015-10-26 11:02:29 +0200557 netdev_txq = netdev_get_tx_queue(ndev, txq_index);
558
Yuval Mintz1a635e42016-08-15 10:42:43 +0300559 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
Yuval Mintz29502192015-10-26 11:02:29 +0200560
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200561 xmit_type = qede_xmit_type(skb, &ipv6_ext);
Yuval Mintz29502192015-10-26 11:02:29 +0200562
Yuval Mintzb1199b12016-02-24 16:52:46 +0200563#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200564 if (qede_pkt_req_lin(skb, xmit_type)) {
Yuval Mintzb1199b12016-02-24 16:52:46 +0200565 if (skb_linearize(skb)) {
566 DP_NOTICE(edev,
567 "SKB linearization failed - silently dropping this SKB\n");
568 dev_kfree_skb_any(skb);
569 return NETDEV_TX_OK;
570 }
571 }
572#endif
573
Yuval Mintz29502192015-10-26 11:02:29 +0200574 /* Fill the entry in the SW ring and the BDs in the FW ring */
575 idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
576 txq->sw_tx_ring[idx].skb = skb;
577 first_bd = (struct eth_tx_1st_bd *)
578 qed_chain_produce(&txq->tx_pbl);
579 memset(first_bd, 0, sizeof(*first_bd));
580 first_bd->data.bd_flags.bitfields =
581 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
582
583 /* Map skb linear data for DMA and set in the first BD */
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200584 mapping = dma_map_single(txq->dev, skb->data,
Yuval Mintz29502192015-10-26 11:02:29 +0200585 skb_headlen(skb), DMA_TO_DEVICE);
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200586 if (unlikely(dma_mapping_error(txq->dev, mapping))) {
Yuval Mintz29502192015-10-26 11:02:29 +0200587 DP_NOTICE(edev, "SKB mapping failed\n");
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200588 qede_free_failed_tx_pkt(txq, first_bd, 0, false);
Manish Chopra312e0672016-06-30 02:35:20 -0400589 qede_update_tx_producer(txq);
Yuval Mintz29502192015-10-26 11:02:29 +0200590 return NETDEV_TX_OK;
591 }
592 nbd++;
593 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
594
595 /* In case there is IPv6 with extension headers or LSO we need 2nd and
596 * 3rd BDs.
597 */
598 if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
599 second_bd = (struct eth_tx_2nd_bd *)
600 qed_chain_produce(&txq->tx_pbl);
601 memset(second_bd, 0, sizeof(*second_bd));
602
603 nbd++;
604 third_bd = (struct eth_tx_3rd_bd *)
605 qed_chain_produce(&txq->tx_pbl);
606 memset(third_bd, 0, sizeof(*third_bd));
607
608 nbd++;
609 /* We need to fill in additional data in second_bd... */
610 tx_data_bd = (struct eth_tx_bd *)second_bd;
611 }
612
613 if (skb_vlan_tag_present(skb)) {
614 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
615 first_bd->data.bd_flags.bitfields |=
616 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
617 }
618
619 /* Fill the parsing flags & params according to the requested offload */
620 if (xmit_type & XMIT_L4_CSUM) {
621 /* We don't re-calculate IP checksum as it is already done by
622 * the upper stack
623 */
624 first_bd->data.bd_flags.bitfields |=
625 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
626
Manish Chopra14db81d2016-04-14 01:38:33 -0400627 if (xmit_type & XMIT_ENC) {
628 first_bd->data.bd_flags.bitfields |=
629 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300630 first_bd->data.bitfields |=
631 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
Manish Chopra14db81d2016-04-14 01:38:33 -0400632 }
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500633
Yuval Mintzd8c2c7e2016-08-22 13:25:11 +0300634 /* Legacy FW had flipped behavior in regard to this bit -
635 * I.e., needed to set to prevent FW from touching encapsulated
636 * packets when it didn't need to.
637 */
638 if (unlikely(txq->is_legacy))
639 first_bd->data.bitfields ^=
640 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
641
Yuval Mintz29502192015-10-26 11:02:29 +0200642 /* If the packet is IPv6 with extension header, indicate that
643 * to FW and pass few params, since the device cracker doesn't
644 * support parsing IPv6 with extension header/s.
645 */
646 if (unlikely(ipv6_ext))
647 qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
648 }
649
650 if (xmit_type & XMIT_LSO) {
651 first_bd->data.bd_flags.bitfields |=
652 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
653 third_bd->data.lso_mss =
654 cpu_to_le16(skb_shinfo(skb)->gso_size);
655
Manish Chopra14db81d2016-04-14 01:38:33 -0400656 if (unlikely(xmit_type & XMIT_ENC)) {
657 first_bd->data.bd_flags.bitfields |=
658 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
Manish Chopraa1502412016-10-14 05:19:18 -0400659
660 if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
661 u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
662
663 first_bd->data.bd_flags.bitfields |= 1 << tmp;
664 }
Manish Chopra14db81d2016-04-14 01:38:33 -0400665 hlen = qede_get_skb_hlen(skb, true);
666 } else {
667 first_bd->data.bd_flags.bitfields |=
668 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
669 hlen = qede_get_skb_hlen(skb, false);
670 }
Yuval Mintz29502192015-10-26 11:02:29 +0200671
672 /* @@@TBD - if will not be removed need to check */
673 third_bd->data.bitfields |=
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500674 cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT));
Yuval Mintz29502192015-10-26 11:02:29 +0200675
676 /* Make life easier for FW guys who can't deal with header and
677 * data on same BD. If we need to split, use the second bd...
678 */
679 if (unlikely(skb_headlen(skb) > hlen)) {
680 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
681 "TSO split header size is %d (%x:%x)\n",
682 first_bd->nbytes, first_bd->addr.hi,
683 first_bd->addr.lo);
684
685 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
686 le32_to_cpu(first_bd->addr.lo)) +
687 hlen;
688
689 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
690 le16_to_cpu(first_bd->nbytes) -
691 hlen);
692
693 /* this marks the BD as one that has no
694 * individual mapping
695 */
696 txq->sw_tx_ring[idx].flags |= QEDE_TSO_SPLIT_BD;
697
698 first_bd->nbytes = cpu_to_le16(hlen);
699
700 tx_data_bd = (struct eth_tx_bd *)third_bd;
701 data_split = true;
702 }
Yuval Mintz351a4ded2016-06-02 10:23:29 +0300703 } else {
704 first_bd->data.bitfields |=
705 (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
706 ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
Yuval Mintz29502192015-10-26 11:02:29 +0200707 }
708
709 /* Handle fragmented skb */
710 /* special handle for frags inside 2nd and 3rd bds.. */
711 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200712 rc = map_frag_to_bd(txq,
Yuval Mintz29502192015-10-26 11:02:29 +0200713 &skb_shinfo(skb)->frags[frag_idx],
714 tx_data_bd);
715 if (rc) {
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200716 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
Manish Chopra312e0672016-06-30 02:35:20 -0400717 qede_update_tx_producer(txq);
Yuval Mintz29502192015-10-26 11:02:29 +0200718 return NETDEV_TX_OK;
719 }
720
721 if (tx_data_bd == (struct eth_tx_bd *)second_bd)
722 tx_data_bd = (struct eth_tx_bd *)third_bd;
723 else
724 tx_data_bd = NULL;
725
726 frag_idx++;
727 }
728
729 /* map last frags into 4th, 5th .... */
730 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
731 tx_data_bd = (struct eth_tx_bd *)
732 qed_chain_produce(&txq->tx_pbl);
733
734 memset(tx_data_bd, 0, sizeof(*tx_data_bd));
735
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200736 rc = map_frag_to_bd(txq,
Yuval Mintz29502192015-10-26 11:02:29 +0200737 &skb_shinfo(skb)->frags[frag_idx],
738 tx_data_bd);
739 if (rc) {
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200740 qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
Manish Chopra312e0672016-06-30 02:35:20 -0400741 qede_update_tx_producer(txq);
Yuval Mintz29502192015-10-26 11:02:29 +0200742 return NETDEV_TX_OK;
743 }
744 }
745
746 /* update the first BD with the actual num BDs */
747 first_bd->data.nbds = nbd;
748
749 netdev_tx_sent_queue(netdev_txq, skb->len);
750
751 skb_tx_timestamp(skb);
752
753 /* Advance packet producer only before sending the packet since mapping
754 * of pages may fail.
755 */
756 txq->sw_tx_prod++;
757
758 /* 'next page' entries are counted in the producer value */
759 txq->tx_db.data.bd_prod =
760 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
761
Yuval Mintz039a3922016-08-16 18:40:18 +0300762 if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
Manish Chopra312e0672016-06-30 02:35:20 -0400763 qede_update_tx_producer(txq);
Yuval Mintz29502192015-10-26 11:02:29 +0200764
765 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
766 < (MAX_SKB_FRAGS + 1))) {
Yuval Mintz039a3922016-08-16 18:40:18 +0300767 if (skb->xmit_more)
768 qede_update_tx_producer(txq);
769
Yuval Mintz29502192015-10-26 11:02:29 +0200770 netif_tx_stop_queue(netdev_txq);
Sudarsana Reddy Kalluru68db9ec2016-08-16 10:51:02 -0400771 txq->stopped_cnt++;
Yuval Mintz29502192015-10-26 11:02:29 +0200772 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
773 "Stop queue was called\n");
774 /* paired memory barrier is in qede_tx_int(), we have to keep
775 * ordering of set_bit() in netif_tx_stop_queue() and read of
776 * fp->bd_tx_cons
777 */
778 smp_mb();
779
780 if (qed_chain_get_elem_left(&txq->tx_pbl)
781 >= (MAX_SKB_FRAGS + 1) &&
782 (edev->state == QEDE_STATE_OPEN)) {
783 netif_tx_wake_queue(netdev_txq);
784 DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
785 "Wake queue was called\n");
786 }
787 }
788
789 return NETDEV_TX_OK;
790}
791
Sudarsana Reddy Kalluru16f46bf2016-04-28 20:20:54 -0400792int qede_txq_has_work(struct qede_tx_queue *txq)
Yuval Mintz29502192015-10-26 11:02:29 +0200793{
794 u16 hw_bd_cons;
795
796 /* Tell compiler that consumer and producer can change */
797 barrier();
798 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
799 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
800 return 0;
801
802 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
803}
804
Yuval Mintz1a635e42016-08-15 10:42:43 +0300805static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
Yuval Mintz29502192015-10-26 11:02:29 +0200806{
807 struct netdev_queue *netdev_txq;
808 u16 hw_bd_cons;
809 unsigned int pkts_compl = 0, bytes_compl = 0;
810 int rc;
811
812 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
813
814 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
815 barrier();
816
817 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
818 int len = 0;
819
820 rc = qede_free_tx_pkt(edev, txq, &len);
821 if (rc) {
822 DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
823 hw_bd_cons,
824 qed_chain_get_cons_idx(&txq->tx_pbl));
825 break;
826 }
827
828 bytes_compl += len;
829 pkts_compl++;
830 txq->sw_tx_cons++;
Sudarsana Reddy Kalluru68db9ec2016-08-16 10:51:02 -0400831 txq->xmit_pkts++;
Yuval Mintz29502192015-10-26 11:02:29 +0200832 }
833
834 netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
835
836 /* Need to make the tx_bd_cons update visible to start_xmit()
837 * before checking for netif_tx_queue_stopped(). Without the
838 * memory barrier, there is a small possibility that
839 * start_xmit() will miss it and cause the queue to be stopped
840 * forever.
841 * On the other hand we need an rmb() here to ensure the proper
842 * ordering of bit testing in the following
843 * netif_tx_queue_stopped(txq) call.
844 */
845 smp_mb();
846
847 if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
848 /* Taking tx_lock is needed to prevent reenabling the queue
849 * while it's empty. This could have happen if rx_action() gets
850 * suspended in qede_tx_int() after the condition before
851 * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
852 *
853 * stops the queue->sees fresh tx_bd_cons->releases the queue->
854 * sends some packets consuming the whole queue again->
855 * stops the queue
856 */
857
858 __netif_tx_lock(netdev_txq, smp_processor_id());
859
860 if ((netif_tx_queue_stopped(netdev_txq)) &&
861 (edev->state == QEDE_STATE_OPEN) &&
862 (qed_chain_get_elem_left(&txq->tx_pbl)
863 >= (MAX_SKB_FRAGS + 1))) {
864 netif_tx_wake_queue(netdev_txq);
865 DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
866 "Wake queue was called\n");
867 }
868
869 __netif_tx_unlock(netdev_txq);
870 }
871
872 return 0;
873}
874
Sudarsana Reddy Kalluru16f46bf2016-04-28 20:20:54 -0400875bool qede_has_rx_work(struct qede_rx_queue *rxq)
Yuval Mintz29502192015-10-26 11:02:29 +0200876{
877 u16 hw_comp_cons, sw_comp_cons;
878
879 /* Tell compiler that status block fields can change */
880 barrier();
881
882 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
883 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
884
885 return hw_comp_cons != sw_comp_cons;
886}
887
Manish Chopraf86af2d2016-04-20 03:03:27 -0400888static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
889{
890 qed_chain_consume(&rxq->rx_bd_ring);
891 rxq->sw_rx_cons++;
892}
893
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500894/* This function reuses the buffer(from an offset) from
895 * consumer index to producer index in the bd ring
Yuval Mintz29502192015-10-26 11:02:29 +0200896 */
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200897static inline void qede_reuse_page(struct qede_rx_queue *rxq,
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500898 struct sw_rx_data *curr_cons)
Yuval Mintz29502192015-10-26 11:02:29 +0200899{
Yuval Mintz29502192015-10-26 11:02:29 +0200900 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500901 struct sw_rx_data *curr_prod;
902 dma_addr_t new_mapping;
Yuval Mintz29502192015-10-26 11:02:29 +0200903
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500904 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
905 *curr_prod = *curr_cons;
Yuval Mintz29502192015-10-26 11:02:29 +0200906
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500907 new_mapping = curr_prod->mapping + curr_prod->page_offset;
Yuval Mintz29502192015-10-26 11:02:29 +0200908
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500909 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
910 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping));
911
Yuval Mintz29502192015-10-26 11:02:29 +0200912 rxq->sw_rx_prod++;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500913 curr_cons->data = NULL;
914}
915
Manish Chopraf86af2d2016-04-20 03:03:27 -0400916/* In case of allocation failures reuse buffers
917 * from consumer index to produce buffers for firmware
918 */
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200919void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
Manish Chopraf86af2d2016-04-20 03:03:27 -0400920{
921 struct sw_rx_data *curr_cons;
922
923 for (; count > 0; count--) {
924 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200925 qede_reuse_page(rxq, curr_cons);
Manish Chopraf86af2d2016-04-20 03:03:27 -0400926 qede_rx_bd_ring_consume(rxq);
927 }
928}
929
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200930static int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
931{
932 struct sw_rx_data *sw_rx_data;
933 struct eth_rx_bd *rx_bd;
934 dma_addr_t mapping;
935 struct page *data;
936
937 data = alloc_pages(GFP_ATOMIC, 0);
938 if (unlikely(!data))
939 return -ENOMEM;
940
941 /* Map the entire page as it would be used
942 * for multiple RX buffer segment size mapping.
943 */
944 mapping = dma_map_page(rxq->dev, data, 0,
945 PAGE_SIZE, DMA_FROM_DEVICE);
946 if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
947 __free_page(data);
948 return -ENOMEM;
949 }
950
951 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
952 sw_rx_data->page_offset = 0;
953 sw_rx_data->data = data;
954 sw_rx_data->mapping = mapping;
955
956 /* Advance PROD and get BD pointer */
957 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
958 WARN_ON(!rx_bd);
959 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
960 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping));
961
962 rxq->sw_rx_prod++;
963
964 return 0;
965}
966
967static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500968 struct sw_rx_data *curr_cons)
969{
970 /* Move to the next segment in the page */
971 curr_cons->page_offset += rxq->rx_buf_seg_size;
972
973 if (curr_cons->page_offset == PAGE_SIZE) {
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200974 if (unlikely(qede_alloc_rx_buffer(rxq))) {
Manish Chopraf86af2d2016-04-20 03:03:27 -0400975 /* Since we failed to allocate new buffer
976 * current buffer can be used again.
977 */
978 curr_cons->page_offset -= rxq->rx_buf_seg_size;
979
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500980 return -ENOMEM;
Manish Chopraf86af2d2016-04-20 03:03:27 -0400981 }
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500982
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200983 dma_unmap_page(rxq->dev, curr_cons->mapping,
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500984 PAGE_SIZE, DMA_FROM_DEVICE);
985 } else {
986 /* Increment refcount of the page as we don't want
987 * network stack to take the ownership of the page
988 * which can be recycled multiple times by the driver.
989 */
Joonsoo Kim6d061f92016-05-19 17:10:46 -0700990 page_ref_inc(curr_cons->data);
Mintz, Yuval9eb22352016-11-29 16:47:08 +0200991 qede_reuse_page(rxq, curr_cons);
Yuval Mintzfc48b7a2016-02-15 13:22:35 -0500992 }
993
994 return 0;
Yuval Mintz29502192015-10-26 11:02:29 +0200995}
996
Sudarsana Reddy Kalluru837d4eb2016-10-21 04:43:41 -0400997void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
Yuval Mintz29502192015-10-26 11:02:29 +0200998{
999 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
1000 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
1001 struct eth_rx_prod_data rx_prods = {0};
1002
1003 /* Update producers */
1004 rx_prods.bd_prod = cpu_to_le16(bd_prod);
1005 rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
1006
1007 /* Make sure that the BD and SGE data is updated before updating the
1008 * producers since FW might read the BD/SGE right after the producer
1009 * is updated.
1010 */
1011 wmb();
1012
1013 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
1014 (u32 *)&rx_prods);
1015
1016 /* mmiowb is needed to synchronize doorbell writes from more than one
1017 * processor. It guarantees that the write arrives to the device before
1018 * the napi lock is released and another qede_poll is called (possibly
1019 * on another CPU). Without this barrier, the next doorbell can bypass
1020 * this doorbell. This is applicable to IA64/Altix systems.
1021 */
1022 mmiowb();
1023}
1024
Mintz, Yuval8a472532016-11-29 16:47:07 +02001025static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
Yuval Mintz29502192015-10-26 11:02:29 +02001026{
Mintz, Yuval8a472532016-11-29 16:47:07 +02001027 enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
Yuval Mintz29502192015-10-26 11:02:29 +02001028 enum rss_hash_type htype;
Mintz, Yuval8a472532016-11-29 16:47:07 +02001029 u32 hash = 0;
Yuval Mintz29502192015-10-26 11:02:29 +02001030
1031 htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
Mintz, Yuval8a472532016-11-29 16:47:07 +02001032 if (htype) {
1033 hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
1034 (htype == RSS_HASH_TYPE_IPV6)) ?
1035 PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
1036 hash = le32_to_cpu(rss_hash);
Yuval Mintz29502192015-10-26 11:02:29 +02001037 }
Mintz, Yuval8a472532016-11-29 16:47:07 +02001038 skb_set_hash(skb, hash, hash_type);
Yuval Mintz29502192015-10-26 11:02:29 +02001039}
1040
1041static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
1042{
1043 skb_checksum_none_assert(skb);
1044
1045 if (csum_flag & QEDE_CSUM_UNNECESSARY)
1046 skb->ip_summed = CHECKSUM_UNNECESSARY;
Manish Chopra14db81d2016-04-14 01:38:33 -04001047
1048 if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
1049 skb->csum_level = 1;
Yuval Mintz29502192015-10-26 11:02:29 +02001050}
1051
1052static inline void qede_skb_receive(struct qede_dev *edev,
1053 struct qede_fastpath *fp,
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001054 struct qede_rx_queue *rxq,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001055 struct sk_buff *skb, u16 vlan_tag)
Yuval Mintz29502192015-10-26 11:02:29 +02001056{
1057 if (vlan_tag)
Yuval Mintz1a635e42016-08-15 10:42:43 +03001058 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Yuval Mintz29502192015-10-26 11:02:29 +02001059
1060 napi_gro_receive(&fp->napi, skb);
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001061 fp->rxq->rcv_pkts++;
Yuval Mintz29502192015-10-26 11:02:29 +02001062}
1063
Manish Chopra55482ed2016-03-04 12:35:06 -05001064static void qede_set_gro_params(struct qede_dev *edev,
1065 struct sk_buff *skb,
1066 struct eth_fast_path_rx_tpa_start_cqe *cqe)
1067{
1068 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
1069
1070 if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
1071 PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
1072 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1073 else
1074 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1075
1076 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
1077 cqe->header_len;
1078}
1079
1080static int qede_fill_frag_skb(struct qede_dev *edev,
1081 struct qede_rx_queue *rxq,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001082 u8 tpa_agg_index, u16 len_on_bd)
Manish Chopra55482ed2016-03-04 12:35:06 -05001083{
1084 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
1085 NUM_RX_BDS_MAX];
1086 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
1087 struct sk_buff *skb = tpa_info->skb;
1088
Mintz, Yuval01e23012016-11-29 16:47:00 +02001089 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
Manish Chopra55482ed2016-03-04 12:35:06 -05001090 goto out;
1091
1092 /* Add one frag and update the appropriate fields in the skb */
1093 skb_fill_page_desc(skb, tpa_info->frag_id++,
1094 current_bd->data, current_bd->page_offset,
1095 len_on_bd);
1096
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001097 if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
Manish Chopraf86af2d2016-04-20 03:03:27 -04001098 /* Incr page ref count to reuse on allocation failure
1099 * so that it doesn't get freed while freeing SKB.
1100 */
Joonsoo Kim0139aa72016-05-19 17:10:49 -07001101 page_ref_inc(current_bd->data);
Manish Chopra55482ed2016-03-04 12:35:06 -05001102 goto out;
1103 }
1104
1105 qed_chain_consume(&rxq->rx_bd_ring);
1106 rxq->sw_rx_cons++;
1107
1108 skb->data_len += len_on_bd;
1109 skb->truesize += rxq->rx_buf_seg_size;
1110 skb->len += len_on_bd;
1111
1112 return 0;
1113
1114out:
Mintz, Yuval01e23012016-11-29 16:47:00 +02001115 tpa_info->state = QEDE_AGG_STATE_ERROR;
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001116 qede_recycle_rx_bd_ring(rxq, 1);
1117
Manish Chopra55482ed2016-03-04 12:35:06 -05001118 return -ENOMEM;
1119}
1120
1121static void qede_tpa_start(struct qede_dev *edev,
1122 struct qede_rx_queue *rxq,
1123 struct eth_fast_path_rx_tpa_start_cqe *cqe)
1124{
1125 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
1126 struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring);
1127 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
Mintz, Yuval01e23012016-11-29 16:47:00 +02001128 struct sw_rx_data *replace_buf = &tpa_info->buffer;
1129 dma_addr_t mapping = tpa_info->buffer_mapping;
Manish Chopra55482ed2016-03-04 12:35:06 -05001130 struct sw_rx_data *sw_rx_data_cons;
1131 struct sw_rx_data *sw_rx_data_prod;
Manish Chopra55482ed2016-03-04 12:35:06 -05001132
1133 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
1134 sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
1135
1136 /* Use pre-allocated replacement buffer - we can't release the agg.
1137 * start until its over and we don't want to risk allocation failing
1138 * here, so re-allocate when aggregation will be over.
1139 */
Manish Chopra09ec8e72016-05-18 07:43:57 -04001140 sw_rx_data_prod->mapping = replace_buf->mapping;
Manish Chopra55482ed2016-03-04 12:35:06 -05001141
1142 sw_rx_data_prod->data = replace_buf->data;
1143 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
1144 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping));
1145 sw_rx_data_prod->page_offset = replace_buf->page_offset;
1146
1147 rxq->sw_rx_prod++;
1148
1149 /* move partial skb from cons to pool (don't unmap yet)
1150 * save mapping, incase we drop the packet later on.
1151 */
Mintz, Yuval01e23012016-11-29 16:47:00 +02001152 tpa_info->buffer = *sw_rx_data_cons;
Manish Chopra55482ed2016-03-04 12:35:06 -05001153 mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi),
1154 le32_to_cpu(rx_bd_cons->addr.lo));
1155
Mintz, Yuval01e23012016-11-29 16:47:00 +02001156 tpa_info->buffer_mapping = mapping;
Manish Chopra55482ed2016-03-04 12:35:06 -05001157 rxq->sw_rx_cons++;
1158
1159 /* set tpa state to start only if we are able to allocate skb
1160 * for this aggregation, otherwise mark as error and aggregation will
1161 * be dropped
1162 */
1163 tpa_info->skb = netdev_alloc_skb(edev->ndev,
1164 le16_to_cpu(cqe->len_on_first_bd));
1165 if (unlikely(!tpa_info->skb)) {
Manish Chopraf86af2d2016-04-20 03:03:27 -04001166 DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
Mintz, Yuval01e23012016-11-29 16:47:00 +02001167 tpa_info->state = QEDE_AGG_STATE_ERROR;
Manish Chopraf86af2d2016-04-20 03:03:27 -04001168 goto cons_buf;
Manish Chopra55482ed2016-03-04 12:35:06 -05001169 }
1170
Manish Chopra55482ed2016-03-04 12:35:06 -05001171 /* Start filling in the aggregation info */
Mintz, Yuval01e23012016-11-29 16:47:00 +02001172 skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd));
Manish Chopra55482ed2016-03-04 12:35:06 -05001173 tpa_info->frag_id = 0;
Mintz, Yuval01e23012016-11-29 16:47:00 +02001174 tpa_info->state = QEDE_AGG_STATE_START;
Manish Chopra55482ed2016-03-04 12:35:06 -05001175
Mintz, Yuval01e23012016-11-29 16:47:00 +02001176 /* Store some information from first CQE */
1177 tpa_info->start_cqe_placement_offset = cqe->placement_offset;
1178 tpa_info->start_cqe_bd_len = le16_to_cpu(cqe->len_on_first_bd);
Manish Chopra55482ed2016-03-04 12:35:06 -05001179 if ((le16_to_cpu(cqe->pars_flags.flags) >>
1180 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
Mintz, Yuval01e23012016-11-29 16:47:00 +02001181 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
Manish Chopra55482ed2016-03-04 12:35:06 -05001182 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
1183 else
1184 tpa_info->vlan_tag = 0;
1185
Mintz, Yuval8a472532016-11-29 16:47:07 +02001186 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
1187
Manish Chopra55482ed2016-03-04 12:35:06 -05001188 /* This is needed in order to enable forwarding support */
1189 qede_set_gro_params(edev, tpa_info->skb, cqe);
1190
Manish Chopraf86af2d2016-04-20 03:03:27 -04001191cons_buf: /* We still need to handle bd_len_list to consume buffers */
Manish Chopra55482ed2016-03-04 12:35:06 -05001192 if (likely(cqe->ext_bd_len_list[0]))
1193 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
1194 le16_to_cpu(cqe->ext_bd_len_list[0]));
1195
1196 if (unlikely(cqe->ext_bd_len_list[1])) {
1197 DP_ERR(edev,
1198 "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
Mintz, Yuval01e23012016-11-29 16:47:00 +02001199 tpa_info->state = QEDE_AGG_STATE_ERROR;
Manish Chopra55482ed2016-03-04 12:35:06 -05001200 }
1201}
1202
Manish Chopra88f09bd2016-03-08 04:09:44 -05001203#ifdef CONFIG_INET
Manish Chopra55482ed2016-03-04 12:35:06 -05001204static void qede_gro_ip_csum(struct sk_buff *skb)
1205{
1206 const struct iphdr *iph = ip_hdr(skb);
1207 struct tcphdr *th;
1208
Manish Chopra55482ed2016-03-04 12:35:06 -05001209 skb_set_transport_header(skb, sizeof(struct iphdr));
1210 th = tcp_hdr(skb);
1211
1212 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
1213 iph->saddr, iph->daddr, 0);
1214
1215 tcp_gro_complete(skb);
1216}
1217
1218static void qede_gro_ipv6_csum(struct sk_buff *skb)
1219{
1220 struct ipv6hdr *iph = ipv6_hdr(skb);
1221 struct tcphdr *th;
1222
Manish Chopra55482ed2016-03-04 12:35:06 -05001223 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
1224 th = tcp_hdr(skb);
1225
1226 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
1227 &iph->saddr, &iph->daddr, 0);
1228 tcp_gro_complete(skb);
1229}
Manish Chopra88f09bd2016-03-08 04:09:44 -05001230#endif
Manish Chopra55482ed2016-03-04 12:35:06 -05001231
1232static void qede_gro_receive(struct qede_dev *edev,
1233 struct qede_fastpath *fp,
1234 struct sk_buff *skb,
1235 u16 vlan_tag)
1236{
Manish Chopraee2fa8e2016-04-20 03:03:29 -04001237 /* FW can send a single MTU sized packet from gro flow
1238 * due to aggregation timeout/last segment etc. which
1239 * is not expected to be a gro packet. If a skb has zero
1240 * frags then simply push it in the stack as non gso skb.
1241 */
1242 if (unlikely(!skb->data_len)) {
1243 skb_shinfo(skb)->gso_type = 0;
1244 skb_shinfo(skb)->gso_size = 0;
1245 goto send_skb;
1246 }
1247
Manish Chopra88f09bd2016-03-08 04:09:44 -05001248#ifdef CONFIG_INET
Manish Chopra55482ed2016-03-04 12:35:06 -05001249 if (skb_shinfo(skb)->gso_size) {
Manish Chopraaad94c02016-04-20 03:03:28 -04001250 skb_set_network_header(skb, 0);
1251
Manish Chopra55482ed2016-03-04 12:35:06 -05001252 switch (skb->protocol) {
1253 case htons(ETH_P_IP):
1254 qede_gro_ip_csum(skb);
1255 break;
1256 case htons(ETH_P_IPV6):
1257 qede_gro_ipv6_csum(skb);
1258 break;
1259 default:
1260 DP_ERR(edev,
1261 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
1262 ntohs(skb->protocol));
1263 }
1264 }
Manish Chopra88f09bd2016-03-08 04:09:44 -05001265#endif
Manish Chopraee2fa8e2016-04-20 03:03:29 -04001266
1267send_skb:
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001268 skb_record_rx_queue(skb, fp->rxq->rxq_id);
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001269 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
Manish Chopra55482ed2016-03-04 12:35:06 -05001270}
1271
1272static inline void qede_tpa_cont(struct qede_dev *edev,
1273 struct qede_rx_queue *rxq,
1274 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1275{
1276 int i;
1277
1278 for (i = 0; cqe->len_list[i]; i++)
1279 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
1280 le16_to_cpu(cqe->len_list[i]));
1281
1282 if (unlikely(i > 1))
1283 DP_ERR(edev,
1284 "Strange - TPA cont with more than a single len_list entry\n");
1285}
1286
1287static void qede_tpa_end(struct qede_dev *edev,
1288 struct qede_fastpath *fp,
1289 struct eth_fast_path_rx_tpa_end_cqe *cqe)
1290{
1291 struct qede_rx_queue *rxq = fp->rxq;
1292 struct qede_agg_info *tpa_info;
1293 struct sk_buff *skb;
1294 int i;
1295
1296 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
1297 skb = tpa_info->skb;
1298
1299 for (i = 0; cqe->len_list[i]; i++)
1300 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
1301 le16_to_cpu(cqe->len_list[i]));
1302 if (unlikely(i > 1))
1303 DP_ERR(edev,
1304 "Strange - TPA emd with more than a single len_list entry\n");
1305
Mintz, Yuval01e23012016-11-29 16:47:00 +02001306 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
Manish Chopra55482ed2016-03-04 12:35:06 -05001307 goto err;
1308
1309 /* Sanity */
1310 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
1311 DP_ERR(edev,
1312 "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1313 cqe->num_of_bds, tpa_info->frag_id);
1314 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
1315 DP_ERR(edev,
1316 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1317 le16_to_cpu(cqe->total_packet_len), skb->len);
1318
1319 memcpy(skb->data,
Mintz, Yuval01e23012016-11-29 16:47:00 +02001320 page_address(tpa_info->buffer.data) +
1321 tpa_info->start_cqe_placement_offset +
1322 tpa_info->buffer.page_offset, tpa_info->start_cqe_bd_len);
Manish Chopra55482ed2016-03-04 12:35:06 -05001323
1324 /* Finalize the SKB */
1325 skb->protocol = eth_type_trans(skb, edev->ndev);
1326 skb->ip_summed = CHECKSUM_UNNECESSARY;
1327
1328 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1329 * to skb_shinfo(skb)->gso_segs
1330 */
1331 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
1332
1333 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
1334
Mintz, Yuval01e23012016-11-29 16:47:00 +02001335 tpa_info->state = QEDE_AGG_STATE_NONE;
Manish Chopra55482ed2016-03-04 12:35:06 -05001336
1337 return;
1338err:
Mintz, Yuval01e23012016-11-29 16:47:00 +02001339 tpa_info->state = QEDE_AGG_STATE_NONE;
Manish Chopra55482ed2016-03-04 12:35:06 -05001340 dev_kfree_skb_any(tpa_info->skb);
1341 tpa_info->skb = NULL;
1342}
1343
Manish Chopra14db81d2016-04-14 01:38:33 -04001344static bool qede_tunn_exist(u16 flag)
1345{
1346 return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
1347 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
1348}
1349
1350static u8 qede_check_tunn_csum(u16 flag)
1351{
1352 u16 csum_flag = 0;
1353 u8 tcsum = 0;
1354
1355 if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
1356 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
1357 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
1358 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
1359
1360 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1361 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1362 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1363 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1364 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
1365 }
1366
1367 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
1368 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
1369 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1370 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1371
1372 if (csum_flag & flag)
1373 return QEDE_CSUM_ERROR;
1374
1375 return QEDE_CSUM_UNNECESSARY | tcsum;
1376}
1377
1378static u8 qede_check_notunn_csum(u16 flag)
Yuval Mintz29502192015-10-26 11:02:29 +02001379{
1380 u16 csum_flag = 0;
1381 u8 csum = 0;
1382
Manish Chopra14db81d2016-04-14 01:38:33 -04001383 if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1384 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
Yuval Mintz29502192015-10-26 11:02:29 +02001385 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1386 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1387 csum = QEDE_CSUM_UNNECESSARY;
1388 }
1389
1390 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1391 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1392
1393 if (csum_flag & flag)
1394 return QEDE_CSUM_ERROR;
1395
1396 return csum;
1397}
1398
Manish Chopra14db81d2016-04-14 01:38:33 -04001399static u8 qede_check_csum(u16 flag)
1400{
1401 if (!qede_tunn_exist(flag))
1402 return qede_check_notunn_csum(flag);
1403 else
1404 return qede_check_tunn_csum(flag);
1405}
1406
Manish Choprac72a6122016-06-30 02:35:18 -04001407static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
1408 u16 flag)
1409{
1410 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
1411
1412 if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
1413 ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
1414 (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1415 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
1416 return true;
1417
1418 return false;
1419}
1420
Mintz, Yuval496e0512016-11-29 16:47:09 +02001421/* Return true iff packet is to be passed to stack */
1422static bool qede_rx_xdp(struct qede_dev *edev,
1423 struct qede_fastpath *fp,
1424 struct qede_rx_queue *rxq,
1425 struct bpf_prog *prog,
1426 struct sw_rx_data *bd,
1427 struct eth_fast_path_rx_reg_cqe *cqe)
1428{
1429 u16 len = le16_to_cpu(cqe->len_on_first_bd);
1430 struct xdp_buff xdp;
1431 enum xdp_action act;
1432
1433 xdp.data = page_address(bd->data) + cqe->placement_offset;
1434 xdp.data_end = xdp.data + len;
1435 act = bpf_prog_run_xdp(prog, &xdp);
1436
1437 if (act == XDP_PASS)
1438 return true;
1439
1440 /* Count number of packets not to be passed to stack */
1441 rxq->xdp_no_pass++;
1442
1443 switch (act) {
1444 default:
1445 bpf_warn_invalid_xdp_action(act);
1446 case XDP_ABORTED:
1447 case XDP_DROP:
1448 qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1449 }
1450
1451 return false;
1452}
1453
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001454static struct sk_buff *qede_rx_allocate_skb(struct qede_dev *edev,
1455 struct qede_rx_queue *rxq,
1456 struct sw_rx_data *bd, u16 len,
1457 u16 pad)
1458{
1459 unsigned int offset = bd->page_offset;
1460 struct skb_frag_struct *frag;
1461 struct page *page = bd->data;
1462 unsigned int pull_len;
1463 struct sk_buff *skb;
1464 unsigned char *va;
1465
1466 /* Allocate a new SKB with a sufficient large header len */
1467 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
1468 if (unlikely(!skb))
1469 return NULL;
1470
1471 /* Copy data into SKB - if it's small, we can simply copy it and
1472 * re-use the already allcoated & mapped memory.
1473 */
1474 if (len + pad <= edev->rx_copybreak) {
1475 memcpy(skb_put(skb, len),
1476 page_address(page) + pad + offset, len);
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001477 qede_reuse_page(rxq, bd);
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001478 goto out;
1479 }
1480
1481 frag = &skb_shinfo(skb)->frags[0];
1482
1483 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1484 page, pad + offset, len, rxq->rx_buf_seg_size);
1485
1486 va = skb_frag_address(frag);
1487 pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE);
1488
1489 /* Align the pull_len to optimize memcpy */
1490 memcpy(skb->data, va, ALIGN(pull_len, sizeof(long)));
1491
1492 /* Correct the skb & frag sizes offset after the pull */
1493 skb_frag_size_sub(frag, pull_len);
1494 frag->page_offset += pull_len;
1495 skb->data_len -= pull_len;
1496 skb->tail += pull_len;
1497
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001498 if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001499 /* Incr page ref count to reuse on allocation failure so
1500 * that it doesn't get freed while freeing SKB [as its
1501 * already mapped there].
1502 */
1503 page_ref_inc(page);
1504 dev_kfree_skb_any(skb);
1505 return NULL;
1506 }
1507
1508out:
1509 /* We've consumed the first BD and prepared an SKB */
1510 qede_rx_bd_ring_consume(rxq);
1511 return skb;
1512}
1513
1514static int qede_rx_build_jumbo(struct qede_dev *edev,
1515 struct qede_rx_queue *rxq,
1516 struct sk_buff *skb,
1517 struct eth_fast_path_rx_reg_cqe *cqe,
1518 u16 first_bd_len)
1519{
1520 u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1521 struct sw_rx_data *bd;
1522 u16 bd_cons_idx;
1523 u8 num_frags;
1524
1525 pkt_len -= first_bd_len;
1526
1527 /* We've already used one BD for the SKB. Now take care of the rest */
1528 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1529 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1530 pkt_len;
1531
1532 if (unlikely(!cur_size)) {
1533 DP_ERR(edev,
1534 "Still got %d BDs for mapping jumbo, but length became 0\n",
1535 num_frags);
1536 goto out;
1537 }
1538
1539 /* We need a replacement buffer for each BD */
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001540 if (unlikely(qede_alloc_rx_buffer(rxq)))
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001541 goto out;
1542
1543 /* Now that we've allocated the replacement buffer,
1544 * we can safely consume the next BD and map it to the SKB.
1545 */
1546 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1547 bd = &rxq->sw_rx_ring[bd_cons_idx];
1548 qede_rx_bd_ring_consume(rxq);
1549
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001550 dma_unmap_page(rxq->dev, bd->mapping,
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001551 PAGE_SIZE, DMA_FROM_DEVICE);
1552
1553 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
1554 bd->data, 0, cur_size);
1555
1556 skb->truesize += PAGE_SIZE;
1557 skb->data_len += cur_size;
1558 skb->len += cur_size;
1559 pkt_len -= cur_size;
1560 }
1561
1562 if (unlikely(pkt_len))
1563 DP_ERR(edev,
1564 "Mapped all BDs of jumbo, but still have %d bytes\n",
1565 pkt_len);
1566
1567out:
1568 return num_frags;
1569}
1570
1571static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1572 struct qede_fastpath *fp,
1573 struct qede_rx_queue *rxq,
1574 union eth_rx_cqe *cqe,
1575 enum eth_rx_cqe_type type)
1576{
1577 switch (type) {
1578 case ETH_RX_CQE_TYPE_TPA_START:
1579 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1580 return 0;
1581 case ETH_RX_CQE_TYPE_TPA_CONT:
1582 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1583 return 0;
1584 case ETH_RX_CQE_TYPE_TPA_END:
1585 qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
1586 return 1;
1587 default:
1588 return 0;
1589 }
1590}
1591
1592static int qede_rx_process_cqe(struct qede_dev *edev,
1593 struct qede_fastpath *fp,
1594 struct qede_rx_queue *rxq)
1595{
Mintz, Yuval496e0512016-11-29 16:47:09 +02001596 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001597 struct eth_fast_path_rx_reg_cqe *fp_cqe;
1598 u16 len, pad, bd_cons_idx, parse_flag;
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001599 enum eth_rx_cqe_type cqe_type;
1600 union eth_rx_cqe *cqe;
1601 struct sw_rx_data *bd;
1602 struct sk_buff *skb;
1603 __le16 flags;
1604 u8 csum_flag;
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001605
1606 /* Get the CQE from the completion ring */
1607 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1608 cqe_type = cqe->fast_path_regular.type;
1609
1610 /* Process an unlikely slowpath event */
1611 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1612 struct eth_slow_path_rx_cqe *sp_cqe;
1613
1614 sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1615 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1616 return 0;
1617 }
1618
1619 /* Handle TPA cqes */
1620 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1621 return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1622
1623 /* Get the data from the SW ring; Consume it only after it's evident
1624 * we wouldn't recycle it.
1625 */
1626 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1627 bd = &rxq->sw_rx_ring[bd_cons_idx];
1628
1629 fp_cqe = &cqe->fast_path_regular;
1630 len = le16_to_cpu(fp_cqe->len_on_first_bd);
1631 pad = fp_cqe->placement_offset;
1632
Mintz, Yuval496e0512016-11-29 16:47:09 +02001633 /* Run eBPF program if one is attached */
1634 if (xdp_prog)
1635 if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe))
1636 return 1;
1637
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001638 /* If this is an error packet then drop it */
1639 flags = cqe->fast_path_regular.pars_flags.flags;
1640 parse_flag = le16_to_cpu(flags);
1641
1642 csum_flag = qede_check_csum(parse_flag);
1643 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1644 if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag)) {
1645 rxq->rx_ip_frags++;
1646 } else {
1647 DP_NOTICE(edev,
1648 "CQE has error, flags = %x, dropping incoming packet\n",
1649 parse_flag);
1650 rxq->rx_hw_errors++;
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001651 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001652 return 0;
1653 }
1654 }
1655
1656 /* Basic validation passed; Need to prepare an SKB. This would also
1657 * guarantee to finally consume the first BD upon success.
1658 */
1659 skb = qede_rx_allocate_skb(edev, rxq, bd, len, pad);
1660 if (!skb) {
1661 rxq->rx_alloc_errors++;
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001662 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001663 return 0;
1664 }
1665
1666 /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
1667 * by a single cqe.
1668 */
1669 if (fp_cqe->bd_num > 1) {
1670 u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1671 fp_cqe, len);
1672
1673 if (unlikely(unmapped_frags > 0)) {
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001674 qede_recycle_rx_bd_ring(rxq, unmapped_frags);
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001675 dev_kfree_skb_any(skb);
1676 return 0;
1677 }
1678 }
1679
1680 /* The SKB contains all the data. Now prepare meta-magic */
1681 skb->protocol = eth_type_trans(skb, edev->ndev);
Mintz, Yuval8a472532016-11-29 16:47:07 +02001682 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001683 qede_set_skb_csum(skb, csum_flag);
1684 skb_record_rx_queue(skb, rxq->rxq_id);
1685
1686 /* SKB is prepared - pass it to stack */
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001687 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001688
1689 return 1;
1690}
1691
Yuval Mintz29502192015-10-26 11:02:29 +02001692static int qede_rx_int(struct qede_fastpath *fp, int budget)
1693{
Yuval Mintz29502192015-10-26 11:02:29 +02001694 struct qede_rx_queue *rxq = fp->rxq;
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001695 struct qede_dev *edev = fp->edev;
1696 u16 hw_comp_cons, sw_comp_cons;
1697 int work_done = 0;
Yuval Mintz29502192015-10-26 11:02:29 +02001698
1699 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1700 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1701
1702 /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1703 * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1704 * read before it is written by FW, then FW writes CQE and SB, and then
1705 * the CPU reads the hw_comp_cons, it will use an old CQE.
1706 */
1707 rmb();
1708
1709 /* Loop to complete all indicated BDs */
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001710 while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
1711 qede_rx_process_cqe(edev, fp, rxq);
Yuval Mintz29502192015-10-26 11:02:29 +02001712 qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1713 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001714 work_done++;
1715 }
Yuval Mintz29502192015-10-26 11:02:29 +02001716
1717 /* Update producers */
1718 qede_update_rx_prod(edev, rxq);
1719
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001720 return work_done;
1721}
Sudarsana Reddy Kalluru68db9ec2016-08-16 10:51:02 -04001722
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001723static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1724{
1725 qed_sb_update_sb_idx(fp->sb_info);
1726
1727 /* *_has_*_work() reads the status block, thus we need to ensure that
1728 * status block indices have been actually read (qed_sb_update_sb_idx)
1729 * prior to this check (*_has_*_work) so that we won't write the
1730 * "newer" value of the status block to HW (if there was a DMA right
1731 * after qede_has_rx_work and if there is no rmb, the memory reading
1732 * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
1733 * In this case there will never be another interrupt until there is
1734 * another update of the status block, while there is still unhandled
1735 * work.
1736 */
1737 rmb();
1738
1739 if (likely(fp->type & QEDE_FASTPATH_RX))
1740 if (qede_has_rx_work(fp->rxq))
1741 return true;
1742
1743 if (likely(fp->type & QEDE_FASTPATH_TX))
1744 if (qede_txq_has_work(fp->txq))
1745 return true;
1746
1747 return false;
Yuval Mintz29502192015-10-26 11:02:29 +02001748}
1749
1750static int qede_poll(struct napi_struct *napi, int budget)
1751{
Yuval Mintz29502192015-10-26 11:02:29 +02001752 struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
Manish Choprac7741692016-06-30 02:35:19 -04001753 napi);
Yuval Mintz29502192015-10-26 11:02:29 +02001754 struct qede_dev *edev = fp->edev;
Manish Choprac7741692016-06-30 02:35:19 -04001755 int rx_work_done = 0;
Yuval Mintz29502192015-10-26 11:02:29 +02001756
Mintz, Yuval80439a12016-11-29 16:47:02 +02001757 if (likely(fp->type & QEDE_FASTPATH_TX) && qede_txq_has_work(fp->txq))
1758 qede_tx_int(edev, fp->txq);
Yuval Mintz29502192015-10-26 11:02:29 +02001759
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001760 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1761 qede_has_rx_work(fp->rxq)) ?
Manish Choprac7741692016-06-30 02:35:19 -04001762 qede_rx_int(fp, budget) : 0;
1763 if (rx_work_done < budget) {
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001764 if (!qede_poll_is_more_work(fp)) {
Manish Choprac7741692016-06-30 02:35:19 -04001765 napi_complete(napi);
Yuval Mintz29502192015-10-26 11:02:29 +02001766
Manish Choprac7741692016-06-30 02:35:19 -04001767 /* Update and reenable interrupts */
Mintz, Yuvalf4fad342016-11-29 16:47:04 +02001768 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
Manish Choprac7741692016-06-30 02:35:19 -04001769 } else {
1770 rx_work_done = budget;
Yuval Mintz29502192015-10-26 11:02:29 +02001771 }
1772 }
1773
Manish Choprac7741692016-06-30 02:35:19 -04001774 return rx_work_done;
Yuval Mintz29502192015-10-26 11:02:29 +02001775}
1776
1777static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1778{
1779 struct qede_fastpath *fp = fp_cookie;
1780
1781 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1782
1783 napi_schedule_irqoff(&fp->napi);
1784 return IRQ_HANDLED;
1785}
1786
1787/* -------------------------------------------------------------------------
1788 * END OF FAST-PATH
1789 * -------------------------------------------------------------------------
1790 */
1791
1792static int qede_open(struct net_device *ndev);
1793static int qede_close(struct net_device *ndev);
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02001794static int qede_set_mac_addr(struct net_device *ndev, void *p);
1795static void qede_set_rx_mode(struct net_device *ndev);
1796static void qede_config_rx_mode(struct net_device *ndev);
1797
1798static int qede_set_ucast_rx_mac(struct qede_dev *edev,
1799 enum qed_filter_xcast_params_type opcode,
1800 unsigned char mac[ETH_ALEN])
1801{
1802 struct qed_filter_params filter_cmd;
1803
1804 memset(&filter_cmd, 0, sizeof(filter_cmd));
1805 filter_cmd.type = QED_FILTER_TYPE_UCAST;
1806 filter_cmd.filter.ucast.type = opcode;
1807 filter_cmd.filter.ucast.mac_valid = 1;
1808 ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
1809
1810 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1811}
1812
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02001813static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
1814 enum qed_filter_xcast_params_type opcode,
1815 u16 vid)
1816{
1817 struct qed_filter_params filter_cmd;
1818
1819 memset(&filter_cmd, 0, sizeof(filter_cmd));
1820 filter_cmd.type = QED_FILTER_TYPE_UCAST;
1821 filter_cmd.filter.ucast.type = opcode;
1822 filter_cmd.filter.ucast.vlan_valid = 1;
1823 filter_cmd.filter.ucast.vlan = vid;
1824
1825 return edev->ops->filter_config(edev->cdev, &filter_cmd);
1826}
1827
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02001828void qede_fill_by_demand_stats(struct qede_dev *edev)
1829{
1830 struct qed_eth_stats stats;
1831
1832 edev->ops->get_vport_stats(edev->cdev, &stats);
1833 edev->stats.no_buff_discards = stats.no_buff_discards;
Sudarsana Reddy Kalluru1a5a3662016-08-16 10:51:01 -04001834 edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
1835 edev->stats.ttl0_discard = stats.ttl0_discard;
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02001836 edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
1837 edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
1838 edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
1839 edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
1840 edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
1841 edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
1842 edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
1843 edev->stats.mac_filter_discards = stats.mac_filter_discards;
1844
1845 edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
1846 edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
1847 edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
1848 edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
1849 edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
1850 edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
1851 edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
1852 edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
1853 edev->stats.coalesced_events = stats.tpa_coalesced_events;
1854 edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
1855 edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
1856 edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
1857
1858 edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
Yuval Mintzd4967cf2016-04-22 08:41:01 +03001859 edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
1860 edev->stats.rx_128_to_255_byte_packets =
1861 stats.rx_128_to_255_byte_packets;
1862 edev->stats.rx_256_to_511_byte_packets =
1863 stats.rx_256_to_511_byte_packets;
1864 edev->stats.rx_512_to_1023_byte_packets =
1865 stats.rx_512_to_1023_byte_packets;
1866 edev->stats.rx_1024_to_1518_byte_packets =
1867 stats.rx_1024_to_1518_byte_packets;
1868 edev->stats.rx_1519_to_1522_byte_packets =
1869 stats.rx_1519_to_1522_byte_packets;
1870 edev->stats.rx_1519_to_2047_byte_packets =
1871 stats.rx_1519_to_2047_byte_packets;
1872 edev->stats.rx_2048_to_4095_byte_packets =
1873 stats.rx_2048_to_4095_byte_packets;
1874 edev->stats.rx_4096_to_9216_byte_packets =
1875 stats.rx_4096_to_9216_byte_packets;
1876 edev->stats.rx_9217_to_16383_byte_packets =
1877 stats.rx_9217_to_16383_byte_packets;
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02001878 edev->stats.rx_crc_errors = stats.rx_crc_errors;
1879 edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
1880 edev->stats.rx_pause_frames = stats.rx_pause_frames;
1881 edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
1882 edev->stats.rx_align_errors = stats.rx_align_errors;
1883 edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
1884 edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
1885 edev->stats.rx_jabbers = stats.rx_jabbers;
1886 edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
1887 edev->stats.rx_fragments = stats.rx_fragments;
1888 edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
1889 edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
1890 edev->stats.tx_128_to_255_byte_packets =
1891 stats.tx_128_to_255_byte_packets;
1892 edev->stats.tx_256_to_511_byte_packets =
1893 stats.tx_256_to_511_byte_packets;
1894 edev->stats.tx_512_to_1023_byte_packets =
1895 stats.tx_512_to_1023_byte_packets;
1896 edev->stats.tx_1024_to_1518_byte_packets =
1897 stats.tx_1024_to_1518_byte_packets;
1898 edev->stats.tx_1519_to_2047_byte_packets =
1899 stats.tx_1519_to_2047_byte_packets;
1900 edev->stats.tx_2048_to_4095_byte_packets =
1901 stats.tx_2048_to_4095_byte_packets;
1902 edev->stats.tx_4096_to_9216_byte_packets =
1903 stats.tx_4096_to_9216_byte_packets;
1904 edev->stats.tx_9217_to_16383_byte_packets =
1905 stats.tx_9217_to_16383_byte_packets;
1906 edev->stats.tx_pause_frames = stats.tx_pause_frames;
1907 edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
1908 edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
1909 edev->stats.tx_total_collisions = stats.tx_total_collisions;
1910 edev->stats.brb_truncates = stats.brb_truncates;
1911 edev->stats.brb_discards = stats.brb_discards;
1912 edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
1913}
1914
Yuval Mintz1a635e42016-08-15 10:42:43 +03001915static
1916struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
1917 struct rtnl_link_stats64 *stats)
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02001918{
1919 struct qede_dev *edev = netdev_priv(dev);
1920
1921 qede_fill_by_demand_stats(edev);
1922
1923 stats->rx_packets = edev->stats.rx_ucast_pkts +
1924 edev->stats.rx_mcast_pkts +
1925 edev->stats.rx_bcast_pkts;
1926 stats->tx_packets = edev->stats.tx_ucast_pkts +
1927 edev->stats.tx_mcast_pkts +
1928 edev->stats.tx_bcast_pkts;
1929
1930 stats->rx_bytes = edev->stats.rx_ucast_bytes +
1931 edev->stats.rx_mcast_bytes +
1932 edev->stats.rx_bcast_bytes;
1933
1934 stats->tx_bytes = edev->stats.tx_ucast_bytes +
1935 edev->stats.tx_mcast_bytes +
1936 edev->stats.tx_bcast_bytes;
1937
1938 stats->tx_errors = edev->stats.tx_err_drop_pkts;
1939 stats->multicast = edev->stats.rx_mcast_pkts +
1940 edev->stats.rx_bcast_pkts;
1941
1942 stats->rx_fifo_errors = edev->stats.no_buff_discards;
1943
1944 stats->collisions = edev->stats.tx_total_collisions;
1945 stats->rx_crc_errors = edev->stats.rx_crc_errors;
1946 stats->rx_frame_errors = edev->stats.rx_align_errors;
1947
1948 return stats;
1949}
1950
Yuval Mintz733def62016-05-11 16:36:22 +03001951#ifdef CONFIG_QED_SRIOV
Yuval Mintz73390ac2016-05-11 16:36:24 +03001952static int qede_get_vf_config(struct net_device *dev, int vfidx,
1953 struct ifla_vf_info *ivi)
1954{
1955 struct qede_dev *edev = netdev_priv(dev);
1956
1957 if (!edev->ops)
1958 return -EINVAL;
1959
1960 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
1961}
1962
Yuval Mintz733def62016-05-11 16:36:22 +03001963static int qede_set_vf_rate(struct net_device *dev, int vfidx,
1964 int min_tx_rate, int max_tx_rate)
1965{
1966 struct qede_dev *edev = netdev_priv(dev);
1967
Yuval Mintzbe7b6d62016-05-26 11:01:17 +03001968 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
Yuval Mintz733def62016-05-11 16:36:22 +03001969 max_tx_rate);
1970}
1971
Yuval Mintz6ddc7602016-05-11 16:36:23 +03001972static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
1973{
1974 struct qede_dev *edev = netdev_priv(dev);
1975
1976 if (!edev->ops)
1977 return -EINVAL;
1978
1979 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
1980}
1981
Yuval Mintz733def62016-05-11 16:36:22 +03001982static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
1983 int link_state)
1984{
1985 struct qede_dev *edev = netdev_priv(dev);
1986
1987 if (!edev->ops)
1988 return -EINVAL;
1989
1990 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
1991}
1992#endif
1993
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02001994static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
1995{
1996 struct qed_update_vport_params params;
1997 int rc;
1998
1999 /* Proceed only if action actually needs to be performed */
2000 if (edev->accept_any_vlan == action)
2001 return;
2002
2003 memset(&params, 0, sizeof(params));
2004
2005 params.vport_id = 0;
2006 params.accept_any_vlan = action;
2007 params.update_accept_any_vlan_flg = 1;
2008
2009 rc = edev->ops->vport_update(edev->cdev, &params);
2010 if (rc) {
2011 DP_ERR(edev, "Failed to %s accept-any-vlan\n",
2012 action ? "enable" : "disable");
2013 } else {
2014 DP_INFO(edev, "%s accept-any-vlan\n",
2015 action ? "enabled" : "disabled");
2016 edev->accept_any_vlan = action;
2017 }
2018}
2019
2020static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
2021{
2022 struct qede_dev *edev = netdev_priv(dev);
2023 struct qede_vlan *vlan, *tmp;
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002024 int rc = 0;
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002025
2026 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid);
2027
2028 vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
2029 if (!vlan) {
2030 DP_INFO(edev, "Failed to allocate struct for vlan\n");
2031 return -ENOMEM;
2032 }
2033 INIT_LIST_HEAD(&vlan->list);
2034 vlan->vid = vid;
2035 vlan->configured = false;
2036
2037 /* Verify vlan isn't already configured */
2038 list_for_each_entry(tmp, &edev->vlan_list, list) {
2039 if (tmp->vid == vlan->vid) {
2040 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
2041 "vlan already configured\n");
2042 kfree(vlan);
2043 return -EEXIST;
2044 }
2045 }
2046
2047 /* If interface is down, cache this VLAN ID and return */
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002048 __qede_lock(edev);
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002049 if (edev->state != QEDE_STATE_OPEN) {
2050 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
2051 "Interface is down, VLAN %d will be configured when interface is up\n",
2052 vid);
2053 if (vid != 0)
2054 edev->non_configured_vlans++;
2055 list_add(&vlan->list, &edev->vlan_list);
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002056 goto out;
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002057 }
2058
2059 /* Check for the filter limit.
2060 * Note - vlan0 has a reserved filter and can be added without
2061 * worrying about quota
2062 */
2063 if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) ||
2064 (vlan->vid == 0)) {
2065 rc = qede_set_ucast_rx_vlan(edev,
2066 QED_FILTER_XCAST_TYPE_ADD,
2067 vlan->vid);
2068 if (rc) {
2069 DP_ERR(edev, "Failed to configure VLAN %d\n",
2070 vlan->vid);
2071 kfree(vlan);
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002072 goto out;
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002073 }
2074 vlan->configured = true;
2075
2076 /* vlan0 filter isn't consuming out of our quota */
2077 if (vlan->vid != 0)
2078 edev->configured_vlans++;
2079 } else {
2080 /* Out of quota; Activate accept-any-VLAN mode */
2081 if (!edev->non_configured_vlans)
2082 qede_config_accept_any_vlan(edev, true);
2083
2084 edev->non_configured_vlans++;
2085 }
2086
2087 list_add(&vlan->list, &edev->vlan_list);
2088
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002089out:
2090 __qede_unlock(edev);
2091 return rc;
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002092}
2093
2094static void qede_del_vlan_from_list(struct qede_dev *edev,
2095 struct qede_vlan *vlan)
2096{
2097 /* vlan0 filter isn't consuming out of our quota */
2098 if (vlan->vid != 0) {
2099 if (vlan->configured)
2100 edev->configured_vlans--;
2101 else
2102 edev->non_configured_vlans--;
2103 }
2104
2105 list_del(&vlan->list);
2106 kfree(vlan);
2107}
2108
2109static int qede_configure_vlan_filters(struct qede_dev *edev)
2110{
2111 int rc = 0, real_rc = 0, accept_any_vlan = 0;
2112 struct qed_dev_eth_info *dev_info;
2113 struct qede_vlan *vlan = NULL;
2114
2115 if (list_empty(&edev->vlan_list))
2116 return 0;
2117
2118 dev_info = &edev->dev_info;
2119
2120 /* Configure non-configured vlans */
2121 list_for_each_entry(vlan, &edev->vlan_list, list) {
2122 if (vlan->configured)
2123 continue;
2124
2125 /* We have used all our credits, now enable accept_any_vlan */
2126 if ((vlan->vid != 0) &&
2127 (edev->configured_vlans == dev_info->num_vlan_filters)) {
2128 accept_any_vlan = 1;
2129 continue;
2130 }
2131
2132 DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid);
2133
2134 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD,
2135 vlan->vid);
2136 if (rc) {
2137 DP_ERR(edev, "Failed to configure VLAN %u\n",
2138 vlan->vid);
2139 real_rc = rc;
2140 continue;
2141 }
2142
2143 vlan->configured = true;
2144 /* vlan0 filter doesn't consume our VLAN filter's quota */
2145 if (vlan->vid != 0) {
2146 edev->non_configured_vlans--;
2147 edev->configured_vlans++;
2148 }
2149 }
2150
2151 /* enable accept_any_vlan mode if we have more VLANs than credits,
2152 * or remove accept_any_vlan mode if we've actually removed
2153 * a non-configured vlan, and all remaining vlans are truly configured.
2154 */
2155
2156 if (accept_any_vlan)
2157 qede_config_accept_any_vlan(edev, true);
2158 else if (!edev->non_configured_vlans)
2159 qede_config_accept_any_vlan(edev, false);
2160
2161 return real_rc;
2162}
2163
2164static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2165{
2166 struct qede_dev *edev = netdev_priv(dev);
2167 struct qede_vlan *vlan = NULL;
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002168 int rc = 0;
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002169
2170 DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid);
2171
2172 /* Find whether entry exists */
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002173 __qede_lock(edev);
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002174 list_for_each_entry(vlan, &edev->vlan_list, list)
2175 if (vlan->vid == vid)
2176 break;
2177
2178 if (!vlan || (vlan->vid != vid)) {
2179 DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN),
2180 "Vlan isn't configured\n");
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002181 goto out;
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002182 }
2183
2184 if (edev->state != QEDE_STATE_OPEN) {
2185 /* As interface is already down, we don't have a VPORT
2186 * instance to remove vlan filter. So just update vlan list
2187 */
2188 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
2189 "Interface is down, removing VLAN from list only\n");
2190 qede_del_vlan_from_list(edev, vlan);
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002191 goto out;
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002192 }
2193
2194 /* Remove vlan */
Yuval Mintzc524e2f52016-07-27 14:45:19 +03002195 if (vlan->configured) {
2196 rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
2197 vid);
2198 if (rc) {
2199 DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002200 goto out;
Yuval Mintzc524e2f52016-07-27 14:45:19 +03002201 }
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002202 }
2203
2204 qede_del_vlan_from_list(edev, vlan);
2205
2206 /* We have removed a VLAN - try to see if we can
2207 * configure non-configured VLAN from the list.
2208 */
2209 rc = qede_configure_vlan_filters(edev);
2210
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002211out:
2212 __qede_unlock(edev);
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002213 return rc;
2214}
2215
2216static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
2217{
2218 struct qede_vlan *vlan = NULL;
2219
2220 if (list_empty(&edev->vlan_list))
2221 return;
2222
2223 list_for_each_entry(vlan, &edev->vlan_list, list) {
2224 if (!vlan->configured)
2225 continue;
2226
2227 vlan->configured = false;
2228
2229 /* vlan0 filter isn't consuming out of our quota */
2230 if (vlan->vid != 0) {
2231 edev->non_configured_vlans++;
2232 edev->configured_vlans--;
2233 }
2234
2235 DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
Yuval Mintz1a635e42016-08-15 10:42:43 +03002236 "marked vlan %d as non-configured\n", vlan->vid);
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002237 }
2238
2239 edev->accept_any_vlan = false;
2240}
2241
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002242static void qede_set_features_reload(struct qede_dev *edev,
2243 struct qede_reload_args *args)
2244{
2245 edev->ndev->features = args->u.features;
2246}
2247
2248int qede_set_features(struct net_device *dev, netdev_features_t features)
Yuval Mintzce2b8852016-05-26 11:01:18 +03002249{
2250 struct qede_dev *edev = netdev_priv(dev);
2251 netdev_features_t changes = features ^ dev->features;
2252 bool need_reload = false;
2253
2254 /* No action needed if hardware GRO is disabled during driver load */
2255 if (changes & NETIF_F_GRO) {
2256 if (dev->features & NETIF_F_GRO)
2257 need_reload = !edev->gro_disable;
2258 else
2259 need_reload = edev->gro_disable;
2260 }
2261
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002262 if (need_reload) {
2263 struct qede_reload_args args;
2264
2265 args.u.features = features;
2266 args.func = &qede_set_features_reload;
2267
Mintz, Yuval496e0512016-11-29 16:47:09 +02002268 /* Make sure that we definitely need to reload.
2269 * In case of an eBPF attached program, there will be no FW
2270 * aggregations, so no need to actually reload.
2271 */
2272 __qede_lock(edev);
2273 if (edev->xdp_prog)
2274 args.func(edev, &args);
2275 else
2276 qede_reload(edev, &args, true);
2277 __qede_unlock(edev);
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002278
Yuval Mintzce2b8852016-05-26 11:01:18 +03002279 return 1;
2280 }
2281
2282 return 0;
2283}
2284
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002285static void qede_udp_tunnel_add(struct net_device *dev,
2286 struct udp_tunnel_info *ti)
Manish Choprab18e1702016-04-14 01:38:30 -04002287{
2288 struct qede_dev *edev = netdev_priv(dev);
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002289 u16 t_port = ntohs(ti->port);
Manish Choprab18e1702016-04-14 01:38:30 -04002290
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002291 switch (ti->type) {
2292 case UDP_TUNNEL_TYPE_VXLAN:
2293 if (edev->vxlan_dst_port)
2294 return;
2295
2296 edev->vxlan_dst_port = t_port;
2297
Yuval Mintz525ef5c2016-08-15 10:42:45 +03002298 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d\n",
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002299 t_port);
2300
2301 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
2302 break;
2303 case UDP_TUNNEL_TYPE_GENEVE:
2304 if (edev->geneve_dst_port)
2305 return;
2306
2307 edev->geneve_dst_port = t_port;
2308
Yuval Mintz525ef5c2016-08-15 10:42:45 +03002309 DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d\n",
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002310 t_port);
2311 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
2312 break;
2313 default:
Manish Choprab18e1702016-04-14 01:38:30 -04002314 return;
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002315 }
Manish Choprab18e1702016-04-14 01:38:30 -04002316
Manish Choprab18e1702016-04-14 01:38:30 -04002317 schedule_delayed_work(&edev->sp_task, 0);
2318}
2319
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002320static void qede_udp_tunnel_del(struct net_device *dev,
2321 struct udp_tunnel_info *ti)
Manish Choprab18e1702016-04-14 01:38:30 -04002322{
2323 struct qede_dev *edev = netdev_priv(dev);
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002324 u16 t_port = ntohs(ti->port);
Manish Choprab18e1702016-04-14 01:38:30 -04002325
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002326 switch (ti->type) {
2327 case UDP_TUNNEL_TYPE_VXLAN:
2328 if (t_port != edev->vxlan_dst_port)
2329 return;
2330
2331 edev->vxlan_dst_port = 0;
2332
Yuval Mintz525ef5c2016-08-15 10:42:45 +03002333 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d\n",
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002334 t_port);
2335
2336 set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
2337 break;
2338 case UDP_TUNNEL_TYPE_GENEVE:
2339 if (t_port != edev->geneve_dst_port)
2340 return;
2341
2342 edev->geneve_dst_port = 0;
2343
Yuval Mintz525ef5c2016-08-15 10:42:45 +03002344 DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d\n",
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002345 t_port);
2346 set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
2347 break;
2348 default:
Manish Choprab18e1702016-04-14 01:38:30 -04002349 return;
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002350 }
Manish Choprab18e1702016-04-14 01:38:30 -04002351
Manish Choprab18e1702016-04-14 01:38:30 -04002352 schedule_delayed_work(&edev->sp_task, 0);
2353}
Manish Chopra9a109dd2016-04-14 01:38:31 -04002354
Manish Chopra25695852016-10-14 05:19:19 -04002355/* 8B udp header + 8B base tunnel header + 32B option length */
2356#define QEDE_MAX_TUN_HDR_LEN 48
2357
2358static netdev_features_t qede_features_check(struct sk_buff *skb,
2359 struct net_device *dev,
2360 netdev_features_t features)
2361{
2362 if (skb->encapsulation) {
2363 u8 l4_proto = 0;
2364
2365 switch (vlan_get_protocol(skb)) {
2366 case htons(ETH_P_IP):
2367 l4_proto = ip_hdr(skb)->protocol;
2368 break;
2369 case htons(ETH_P_IPV6):
2370 l4_proto = ipv6_hdr(skb)->nexthdr;
2371 break;
2372 default:
2373 return features;
2374 }
2375
2376 /* Disable offloads for geneve tunnels, as HW can't parse
2377 * the geneve header which has option length greater than 32B.
2378 */
2379 if ((l4_proto == IPPROTO_UDP) &&
2380 ((skb_inner_mac_header(skb) -
2381 skb_transport_header(skb)) > QEDE_MAX_TUN_HDR_LEN))
2382 return features & ~(NETIF_F_CSUM_MASK |
2383 NETIF_F_GSO_MASK);
2384 }
2385
2386 return features;
2387}
2388
Mintz, Yuval496e0512016-11-29 16:47:09 +02002389static void qede_xdp_reload_func(struct qede_dev *edev,
2390 struct qede_reload_args *args)
2391{
2392 struct bpf_prog *old;
2393
2394 old = xchg(&edev->xdp_prog, args->u.new_prog);
2395 if (old)
2396 bpf_prog_put(old);
2397}
2398
2399static int qede_xdp_set(struct qede_dev *edev, struct bpf_prog *prog)
2400{
2401 struct qede_reload_args args;
2402
2403 /* If we're called, there was already a bpf reference increment */
2404 args.func = &qede_xdp_reload_func;
2405 args.u.new_prog = prog;
2406 qede_reload(edev, &args, false);
2407
2408 return 0;
2409}
2410
2411static int qede_xdp(struct net_device *dev, struct netdev_xdp *xdp)
2412{
2413 struct qede_dev *edev = netdev_priv(dev);
2414
2415 switch (xdp->command) {
2416 case XDP_SETUP_PROG:
2417 return qede_xdp_set(edev, xdp->prog);
2418 case XDP_QUERY_PROG:
2419 xdp->prog_attached = !!edev->xdp_prog;
2420 return 0;
2421 default:
2422 return -EINVAL;
2423 }
2424}
2425
Yuval Mintz29502192015-10-26 11:02:29 +02002426static const struct net_device_ops qede_netdev_ops = {
2427 .ndo_open = qede_open,
2428 .ndo_stop = qede_close,
2429 .ndo_start_xmit = qede_start_xmit,
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02002430 .ndo_set_rx_mode = qede_set_rx_mode,
2431 .ndo_set_mac_address = qede_set_mac_addr,
Yuval Mintz29502192015-10-26 11:02:29 +02002432 .ndo_validate_addr = eth_validate_addr,
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02002433 .ndo_change_mtu = qede_change_mtu,
Yuval Mintz08feecd2016-05-11 16:36:20 +03002434#ifdef CONFIG_QED_SRIOV
Yuval Mintzeff16962016-05-11 16:36:21 +03002435 .ndo_set_vf_mac = qede_set_vf_mac,
Yuval Mintz08feecd2016-05-11 16:36:20 +03002436 .ndo_set_vf_vlan = qede_set_vf_vlan,
2437#endif
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002438 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
2439 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
Yuval Mintzce2b8852016-05-26 11:01:18 +03002440 .ndo_set_features = qede_set_features,
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02002441 .ndo_get_stats64 = qede_get_stats64,
Yuval Mintz733def62016-05-11 16:36:22 +03002442#ifdef CONFIG_QED_SRIOV
2443 .ndo_set_vf_link_state = qede_set_vf_link_state,
Yuval Mintz6ddc7602016-05-11 16:36:23 +03002444 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
Yuval Mintz73390ac2016-05-11 16:36:24 +03002445 .ndo_get_vf_config = qede_get_vf_config,
Yuval Mintz733def62016-05-11 16:36:22 +03002446 .ndo_set_vf_rate = qede_set_vf_rate,
2447#endif
Alexander Duyckf9f082a2016-06-16 12:22:57 -07002448 .ndo_udp_tunnel_add = qede_udp_tunnel_add,
2449 .ndo_udp_tunnel_del = qede_udp_tunnel_del,
Manish Chopra25695852016-10-14 05:19:19 -04002450 .ndo_features_check = qede_features_check,
Mintz, Yuval496e0512016-11-29 16:47:09 +02002451 .ndo_xdp = qede_xdp,
Yuval Mintz29502192015-10-26 11:02:29 +02002452};
2453
2454/* -------------------------------------------------------------------------
Yuval Mintze712d522015-10-26 11:02:27 +02002455 * START OF PROBE / REMOVE
2456 * -------------------------------------------------------------------------
2457 */
2458
2459static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
2460 struct pci_dev *pdev,
2461 struct qed_dev_eth_info *info,
Yuval Mintz1a635e42016-08-15 10:42:43 +03002462 u32 dp_module, u8 dp_level)
Yuval Mintze712d522015-10-26 11:02:27 +02002463{
2464 struct net_device *ndev;
2465 struct qede_dev *edev;
2466
2467 ndev = alloc_etherdev_mqs(sizeof(*edev),
Yuval Mintz1a635e42016-08-15 10:42:43 +03002468 info->num_queues, info->num_queues);
Yuval Mintze712d522015-10-26 11:02:27 +02002469 if (!ndev) {
2470 pr_err("etherdev allocation failed\n");
2471 return NULL;
2472 }
2473
2474 edev = netdev_priv(ndev);
2475 edev->ndev = ndev;
2476 edev->cdev = cdev;
2477 edev->pdev = pdev;
2478 edev->dp_module = dp_module;
2479 edev->dp_level = dp_level;
2480 edev->ops = qed_ops;
Yuval Mintz29502192015-10-26 11:02:29 +02002481 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
2482 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
Yuval Mintze712d522015-10-26 11:02:27 +02002483
Yuval Mintz525ef5c2016-08-15 10:42:45 +03002484 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
2485 info->num_queues, info->num_queues);
2486
Yuval Mintze712d522015-10-26 11:02:27 +02002487 SET_NETDEV_DEV(ndev, &pdev->dev);
2488
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02002489 memset(&edev->stats, 0, sizeof(edev->stats));
Yuval Mintze712d522015-10-26 11:02:27 +02002490 memcpy(&edev->dev_info, info, sizeof(*info));
2491
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002492 INIT_LIST_HEAD(&edev->vlan_list);
2493
Yuval Mintze712d522015-10-26 11:02:27 +02002494 return edev;
2495}
2496
2497static void qede_init_ndev(struct qede_dev *edev)
2498{
2499 struct net_device *ndev = edev->ndev;
2500 struct pci_dev *pdev = edev->pdev;
2501 u32 hw_features;
2502
2503 pci_set_drvdata(pdev, ndev);
2504
2505 ndev->mem_start = edev->dev_info.common.pci_mem_start;
2506 ndev->base_addr = ndev->mem_start;
2507 ndev->mem_end = edev->dev_info.common.pci_mem_end;
2508 ndev->irq = edev->dev_info.common.pci_irq;
2509
2510 ndev->watchdog_timeo = TX_TIMEOUT;
2511
Yuval Mintz29502192015-10-26 11:02:29 +02002512 ndev->netdev_ops = &qede_netdev_ops;
2513
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02002514 qede_set_ethtool_ops(ndev);
2515
Mintz, Yuval0183eb12016-10-31 22:26:53 +02002516 ndev->priv_flags |= IFF_UNICAST_FLT;
Yuval Mintz7b7e70f2016-10-14 05:19:20 -04002517
Yuval Mintze712d522015-10-26 11:02:27 +02002518 /* user-changeble features */
2519 hw_features = NETIF_F_GRO | NETIF_F_SG |
2520 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2521 NETIF_F_TSO | NETIF_F_TSO6;
2522
Manish Chopra14db81d2016-04-14 01:38:33 -04002523 /* Encap features*/
2524 hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
Manish Chopraa1502412016-10-14 05:19:18 -04002525 NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
2526 NETIF_F_GSO_GRE_CSUM;
Manish Chopra14db81d2016-04-14 01:38:33 -04002527 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2528 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
2529 NETIF_F_TSO6 | NETIF_F_GSO_GRE |
Manish Chopraa1502412016-10-14 05:19:18 -04002530 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
2531 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2532 NETIF_F_GSO_GRE_CSUM;
Manish Chopra14db81d2016-04-14 01:38:33 -04002533
Yuval Mintze712d522015-10-26 11:02:27 +02002534 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
2535 NETIF_F_HIGHDMA;
2536 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
2537 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02002538 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
Yuval Mintze712d522015-10-26 11:02:27 +02002539
2540 ndev->hw_features = hw_features;
2541
Jarod Wilsoncaff2a82016-10-17 15:54:08 -04002542 /* MTU range: 46 - 9600 */
2543 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
2544 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
2545
Yuval Mintze712d522015-10-26 11:02:27 +02002546 /* Set network device HW mac */
2547 ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02002548
2549 ndev->mtu = edev->dev_info.common.mtu;
Yuval Mintze712d522015-10-26 11:02:27 +02002550}
2551
2552/* This function converts from 32b param to two params of level and module
2553 * Input 32b decoding:
2554 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
2555 * 'happy' flow, e.g. memory allocation failed.
2556 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
2557 * and provide important parameters.
2558 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
2559 * module. VERBOSE prints are for tracking the specific flow in low level.
2560 *
2561 * Notice that the level should be that of the lowest required logs.
2562 */
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02002563void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
Yuval Mintze712d522015-10-26 11:02:27 +02002564{
2565 *p_dp_level = QED_LEVEL_NOTICE;
2566 *p_dp_module = 0;
2567
2568 if (debug & QED_LOG_VERBOSE_MASK) {
2569 *p_dp_level = QED_LEVEL_VERBOSE;
2570 *p_dp_module = (debug & 0x3FFFFFFF);
2571 } else if (debug & QED_LOG_INFO_MASK) {
2572 *p_dp_level = QED_LEVEL_INFO;
2573 } else if (debug & QED_LOG_NOTICE_MASK) {
2574 *p_dp_level = QED_LEVEL_NOTICE;
2575 }
2576}
2577
Yuval Mintz29502192015-10-26 11:02:29 +02002578static void qede_free_fp_array(struct qede_dev *edev)
2579{
2580 if (edev->fp_array) {
2581 struct qede_fastpath *fp;
2582 int i;
2583
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002584 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +02002585 fp = &edev->fp_array[i];
2586
2587 kfree(fp->sb_info);
2588 kfree(fp->rxq);
Mintz, Yuval80439a12016-11-29 16:47:02 +02002589 kfree(fp->txq);
Yuval Mintz29502192015-10-26 11:02:29 +02002590 }
2591 kfree(edev->fp_array);
2592 }
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002593
2594 edev->num_queues = 0;
2595 edev->fp_num_tx = 0;
2596 edev->fp_num_rx = 0;
Yuval Mintz29502192015-10-26 11:02:29 +02002597}
2598
2599static int qede_alloc_fp_array(struct qede_dev *edev)
2600{
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002601 u8 fp_combined, fp_rx = edev->fp_num_rx;
Yuval Mintz29502192015-10-26 11:02:29 +02002602 struct qede_fastpath *fp;
2603 int i;
2604
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002605 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
Yuval Mintz29502192015-10-26 11:02:29 +02002606 sizeof(*edev->fp_array), GFP_KERNEL);
2607 if (!edev->fp_array) {
2608 DP_NOTICE(edev, "fp array allocation failed\n");
2609 goto err;
2610 }
2611
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002612 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
2613
2614 /* Allocate the FP elements for Rx queues followed by combined and then
2615 * the Tx. This ordering should be maintained so that the respective
2616 * queues (Rx or Tx) will be together in the fastpath array and the
2617 * associated ids will be sequential.
2618 */
2619 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +02002620 fp = &edev->fp_array[i];
2621
Mintz, Yuval80439a12016-11-29 16:47:02 +02002622 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
Yuval Mintz29502192015-10-26 11:02:29 +02002623 if (!fp->sb_info) {
2624 DP_NOTICE(edev, "sb info struct allocation failed\n");
2625 goto err;
2626 }
2627
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002628 if (fp_rx) {
2629 fp->type = QEDE_FASTPATH_RX;
2630 fp_rx--;
2631 } else if (fp_combined) {
2632 fp->type = QEDE_FASTPATH_COMBINED;
2633 fp_combined--;
2634 } else {
2635 fp->type = QEDE_FASTPATH_TX;
Yuval Mintz29502192015-10-26 11:02:29 +02002636 }
2637
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002638 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02002639 fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
2640 if (!fp->txq)
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002641 goto err;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002642 }
2643
2644 if (fp->type & QEDE_FASTPATH_RX) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02002645 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
2646 if (!fp->rxq)
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002647 goto err;
Mintz, Yuval496e0512016-11-29 16:47:09 +02002648
2649 if (edev->xdp_prog)
2650 fp->type |= QEDE_FASTPATH_XDP;
Yuval Mintz29502192015-10-26 11:02:29 +02002651 }
2652 }
2653
2654 return 0;
2655err:
2656 qede_free_fp_array(edev);
2657 return -ENOMEM;
2658}
2659
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02002660static void qede_sp_task(struct work_struct *work)
2661{
2662 struct qede_dev *edev = container_of(work, struct qede_dev,
2663 sp_task.work);
Manish Choprab18e1702016-04-14 01:38:30 -04002664 struct qed_dev *cdev = edev->cdev;
2665
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002666 __qede_lock(edev);
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02002667
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002668 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
2669 if (edev->state == QEDE_STATE_OPEN)
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02002670 qede_config_rx_mode(edev->ndev);
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02002671
Manish Choprab18e1702016-04-14 01:38:30 -04002672 if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
2673 struct qed_tunn_params tunn_params;
2674
2675 memset(&tunn_params, 0, sizeof(tunn_params));
2676 tunn_params.update_vxlan_port = 1;
2677 tunn_params.vxlan_port = edev->vxlan_dst_port;
2678 qed_ops->tunn_config(cdev, &tunn_params);
2679 }
2680
Manish Chopra9a109dd2016-04-14 01:38:31 -04002681 if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
2682 struct qed_tunn_params tunn_params;
2683
2684 memset(&tunn_params, 0, sizeof(tunn_params));
2685 tunn_params.update_geneve_port = 1;
2686 tunn_params.geneve_port = edev->geneve_dst_port;
2687 qed_ops->tunn_config(cdev, &tunn_params);
2688 }
2689
Mintz, Yuval567b3c12016-11-29 16:47:05 +02002690 __qede_unlock(edev);
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02002691}
2692
Yuval Mintze712d522015-10-26 11:02:27 +02002693static void qede_update_pf_params(struct qed_dev *cdev)
2694{
2695 struct qed_pf_params pf_params;
2696
Sudarsana Reddy Kalluru8e0ddc02016-05-05 00:35:16 -04002697 /* 64 rx + 64 tx */
Yuval Mintze712d522015-10-26 11:02:27 +02002698 memset(&pf_params, 0, sizeof(struct qed_pf_params));
Sudarsana Reddy Kalluru8e0ddc02016-05-05 00:35:16 -04002699 pf_params.eth_pf_params.num_cons = 128;
Yuval Mintze712d522015-10-26 11:02:27 +02002700 qed_ops->common->update_pf_params(cdev, &pf_params);
2701}
2702
2703enum qede_probe_mode {
2704 QEDE_PROBE_NORMAL,
2705};
2706
2707static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03002708 bool is_vf, enum qede_probe_mode mode)
Yuval Mintze712d522015-10-26 11:02:27 +02002709{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03002710 struct qed_probe_params probe_params;
Yuval Mintz1a635e42016-08-15 10:42:43 +03002711 struct qed_slowpath_params sp_params;
Yuval Mintze712d522015-10-26 11:02:27 +02002712 struct qed_dev_eth_info dev_info;
2713 struct qede_dev *edev;
2714 struct qed_dev *cdev;
2715 int rc;
2716
2717 if (unlikely(dp_level & QED_LEVEL_INFO))
2718 pr_notice("Starting qede probe\n");
2719
Yuval Mintz1408cc1f2016-05-11 16:36:14 +03002720 memset(&probe_params, 0, sizeof(probe_params));
2721 probe_params.protocol = QED_PROTOCOL_ETH;
2722 probe_params.dp_module = dp_module;
2723 probe_params.dp_level = dp_level;
2724 probe_params.is_vf = is_vf;
2725 cdev = qed_ops->common->probe(pdev, &probe_params);
Yuval Mintze712d522015-10-26 11:02:27 +02002726 if (!cdev) {
2727 rc = -ENODEV;
2728 goto err0;
2729 }
2730
2731 qede_update_pf_params(cdev);
2732
2733 /* Start the Slowpath-process */
Yuval Mintz1a635e42016-08-15 10:42:43 +03002734 memset(&sp_params, 0, sizeof(sp_params));
2735 sp_params.int_mode = QED_INT_MODE_MSIX;
2736 sp_params.drv_major = QEDE_MAJOR_VERSION;
2737 sp_params.drv_minor = QEDE_MINOR_VERSION;
2738 sp_params.drv_rev = QEDE_REVISION_VERSION;
2739 sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
2740 strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
2741 rc = qed_ops->common->slowpath_start(cdev, &sp_params);
Yuval Mintze712d522015-10-26 11:02:27 +02002742 if (rc) {
2743 pr_notice("Cannot start slowpath\n");
2744 goto err1;
2745 }
2746
2747 /* Learn information crucial for qede to progress */
2748 rc = qed_ops->fill_dev_info(cdev, &dev_info);
2749 if (rc)
2750 goto err2;
2751
2752 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
2753 dp_level);
2754 if (!edev) {
2755 rc = -ENOMEM;
2756 goto err2;
2757 }
2758
Yuval Mintzfefb0202016-05-11 16:36:19 +03002759 if (is_vf)
2760 edev->flags |= QEDE_FLAG_IS_VF;
2761
Yuval Mintze712d522015-10-26 11:02:27 +02002762 qede_init_ndev(edev);
2763
Ram Amranicee9fbd2016-10-01 21:59:56 +03002764 rc = qede_roce_dev_add(edev);
2765 if (rc)
2766 goto err3;
2767
Yuval Mintz29502192015-10-26 11:02:29 +02002768 rc = register_netdev(edev->ndev);
2769 if (rc) {
2770 DP_NOTICE(edev, "Cannot register net-device\n");
Ram Amranicee9fbd2016-10-01 21:59:56 +03002771 goto err4;
Yuval Mintz29502192015-10-26 11:02:29 +02002772 }
2773
Yuval Mintze712d522015-10-26 11:02:27 +02002774 edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
2775
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02002776 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
2777
Sudarsana Reddy Kalluru489e45a2016-06-08 06:22:12 -04002778#ifdef CONFIG_DCB
Sudarsana Reddy Kalluru5fe118c2016-08-29 08:29:52 -04002779 if (!IS_VF(edev))
2780 qede_set_dcbnl_ops(edev->ndev);
Sudarsana Reddy Kalluru489e45a2016-06-08 06:22:12 -04002781#endif
2782
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02002783 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
2784 mutex_init(&edev->qede_lock);
Manish Chopra3d789992016-06-30 02:35:21 -04002785 edev->rx_copybreak = QEDE_RX_HDR_SIZE;
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02002786
Yuval Mintze712d522015-10-26 11:02:27 +02002787 DP_INFO(edev, "Ending successfully qede probe\n");
2788
2789 return 0;
2790
Ram Amranicee9fbd2016-10-01 21:59:56 +03002791err4:
2792 qede_roce_dev_remove(edev);
Yuval Mintz29502192015-10-26 11:02:29 +02002793err3:
2794 free_netdev(edev->ndev);
Yuval Mintze712d522015-10-26 11:02:27 +02002795err2:
2796 qed_ops->common->slowpath_stop(cdev);
2797err1:
2798 qed_ops->common->remove(cdev);
2799err0:
2800 return rc;
2801}
2802
2803static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2804{
Yuval Mintzfefb0202016-05-11 16:36:19 +03002805 bool is_vf = false;
Yuval Mintze712d522015-10-26 11:02:27 +02002806 u32 dp_module = 0;
2807 u8 dp_level = 0;
2808
Yuval Mintzfefb0202016-05-11 16:36:19 +03002809 switch ((enum qede_pci_private)id->driver_data) {
2810 case QEDE_PRIVATE_VF:
2811 if (debug & QED_LOG_VERBOSE_MASK)
2812 dev_err(&pdev->dev, "Probing a VF\n");
2813 is_vf = true;
2814 break;
2815 default:
2816 if (debug & QED_LOG_VERBOSE_MASK)
2817 dev_err(&pdev->dev, "Probing a PF\n");
2818 }
2819
Yuval Mintze712d522015-10-26 11:02:27 +02002820 qede_config_debug(debug, &dp_module, &dp_level);
2821
Yuval Mintzfefb0202016-05-11 16:36:19 +03002822 return __qede_probe(pdev, dp_module, dp_level, is_vf,
Yuval Mintze712d522015-10-26 11:02:27 +02002823 QEDE_PROBE_NORMAL);
2824}
2825
2826enum qede_remove_mode {
2827 QEDE_REMOVE_NORMAL,
2828};
2829
2830static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
2831{
2832 struct net_device *ndev = pci_get_drvdata(pdev);
2833 struct qede_dev *edev = netdev_priv(ndev);
2834 struct qed_dev *cdev = edev->cdev;
2835
2836 DP_INFO(edev, "Starting qede_remove\n");
2837
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02002838 cancel_delayed_work_sync(&edev->sp_task);
Ram Amranicee9fbd2016-10-01 21:59:56 +03002839
Yuval Mintz29502192015-10-26 11:02:29 +02002840 unregister_netdev(ndev);
2841
Ram Amranicee9fbd2016-10-01 21:59:56 +03002842 qede_roce_dev_remove(edev);
2843
Yuval Mintze712d522015-10-26 11:02:27 +02002844 edev->ops->common->set_power_state(cdev, PCI_D0);
2845
2846 pci_set_drvdata(pdev, NULL);
2847
Mintz, Yuval496e0512016-11-29 16:47:09 +02002848 /* Release edev's reference to XDP's bpf if such exist */
2849 if (edev->xdp_prog)
2850 bpf_prog_put(edev->xdp_prog);
2851
Yuval Mintze712d522015-10-26 11:02:27 +02002852 free_netdev(ndev);
2853
2854 /* Use global ops since we've freed edev */
2855 qed_ops->common->slowpath_stop(cdev);
Mintz, Yuval14d39642016-10-31 07:14:23 +02002856 if (system_state == SYSTEM_POWER_OFF)
2857 return;
Yuval Mintze712d522015-10-26 11:02:27 +02002858 qed_ops->common->remove(cdev);
2859
Yuval Mintz525ef5c2016-08-15 10:42:45 +03002860 dev_info(&pdev->dev, "Ending qede_remove successfully\n");
Yuval Mintze712d522015-10-26 11:02:27 +02002861}
2862
2863static void qede_remove(struct pci_dev *pdev)
2864{
2865 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
2866}
Yuval Mintz29502192015-10-26 11:02:29 +02002867
Mintz, Yuval14d39642016-10-31 07:14:23 +02002868static void qede_shutdown(struct pci_dev *pdev)
2869{
2870 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
2871}
2872
Yuval Mintz29502192015-10-26 11:02:29 +02002873/* -------------------------------------------------------------------------
2874 * START OF LOAD / UNLOAD
2875 * -------------------------------------------------------------------------
2876 */
2877
2878static int qede_set_num_queues(struct qede_dev *edev)
2879{
2880 int rc;
2881 u16 rss_num;
2882
2883 /* Setup queues according to possible resources*/
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002884 if (edev->req_queues)
2885 rss_num = edev->req_queues;
Sudarsana Kalluru8edf0492015-11-30 12:25:01 +02002886 else
2887 rss_num = netif_get_num_default_rss_queues() *
2888 edev->dev_info.common.num_hwfns;
Yuval Mintz29502192015-10-26 11:02:29 +02002889
2890 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
2891
2892 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
2893 if (rc > 0) {
2894 /* Managed to request interrupts for our queues */
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002895 edev->num_queues = rc;
Yuval Mintz29502192015-10-26 11:02:29 +02002896 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002897 QEDE_QUEUE_CNT(edev), rss_num);
Yuval Mintz29502192015-10-26 11:02:29 +02002898 rc = 0;
2899 }
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04002900
2901 edev->fp_num_tx = edev->req_num_tx;
2902 edev->fp_num_rx = edev->req_num_rx;
2903
Yuval Mintz29502192015-10-26 11:02:29 +02002904 return rc;
2905}
2906
2907static void qede_free_mem_sb(struct qede_dev *edev,
2908 struct qed_sb_info *sb_info)
2909{
2910 if (sb_info->sb_virt)
2911 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
2912 (void *)sb_info->sb_virt, sb_info->sb_phys);
2913}
2914
2915/* This function allocates fast-path status block memory */
2916static int qede_alloc_mem_sb(struct qede_dev *edev,
Yuval Mintz1a635e42016-08-15 10:42:43 +03002917 struct qed_sb_info *sb_info, u16 sb_id)
Yuval Mintz29502192015-10-26 11:02:29 +02002918{
2919 struct status_block *sb_virt;
2920 dma_addr_t sb_phys;
2921 int rc;
2922
2923 sb_virt = dma_alloc_coherent(&edev->pdev->dev,
Yuval Mintz1a635e42016-08-15 10:42:43 +03002924 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
Yuval Mintz29502192015-10-26 11:02:29 +02002925 if (!sb_virt) {
2926 DP_ERR(edev, "Status block allocation failed\n");
2927 return -ENOMEM;
2928 }
2929
2930 rc = edev->ops->common->sb_init(edev->cdev, sb_info,
2931 sb_virt, sb_phys, sb_id,
2932 QED_SB_TYPE_L2_QUEUE);
2933 if (rc) {
2934 DP_ERR(edev, "Status block initialization failed\n");
2935 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
2936 sb_virt, sb_phys);
2937 return rc;
2938 }
2939
2940 return 0;
2941}
2942
2943static void qede_free_rx_buffers(struct qede_dev *edev,
2944 struct qede_rx_queue *rxq)
2945{
2946 u16 i;
2947
2948 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
2949 struct sw_rx_data *rx_buf;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05002950 struct page *data;
Yuval Mintz29502192015-10-26 11:02:29 +02002951
2952 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
2953 data = rx_buf->data;
2954
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05002955 dma_unmap_page(&edev->pdev->dev,
Yuval Mintz1a635e42016-08-15 10:42:43 +03002956 rx_buf->mapping, PAGE_SIZE, DMA_FROM_DEVICE);
Yuval Mintz29502192015-10-26 11:02:29 +02002957
2958 rx_buf->data = NULL;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05002959 __free_page(data);
Yuval Mintz29502192015-10-26 11:02:29 +02002960 }
2961}
2962
Yuval Mintz1a635e42016-08-15 10:42:43 +03002963static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
2964{
Manish Chopra55482ed2016-03-04 12:35:06 -05002965 int i;
2966
2967 if (edev->gro_disable)
2968 return;
2969
2970 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
2971 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
Mintz, Yuval01e23012016-11-29 16:47:00 +02002972 struct sw_rx_data *replace_buf = &tpa_info->buffer;
Manish Chopra55482ed2016-03-04 12:35:06 -05002973
Manish Chopraf86af2d2016-04-20 03:03:27 -04002974 if (replace_buf->data) {
Manish Chopra55482ed2016-03-04 12:35:06 -05002975 dma_unmap_page(&edev->pdev->dev,
Manish Chopra09ec8e72016-05-18 07:43:57 -04002976 replace_buf->mapping,
Manish Chopra55482ed2016-03-04 12:35:06 -05002977 PAGE_SIZE, DMA_FROM_DEVICE);
2978 __free_page(replace_buf->data);
2979 }
2980 }
2981}
2982
Yuval Mintz1a635e42016-08-15 10:42:43 +03002983static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
Yuval Mintz29502192015-10-26 11:02:29 +02002984{
Manish Chopra55482ed2016-03-04 12:35:06 -05002985 qede_free_sge_mem(edev, rxq);
2986
Yuval Mintz29502192015-10-26 11:02:29 +02002987 /* Free rx buffers */
2988 qede_free_rx_buffers(edev, rxq);
2989
2990 /* Free the parallel SW ring */
2991 kfree(rxq->sw_rx_ring);
2992
2993 /* Free the real RQ ring used by FW */
2994 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
2995 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
2996}
2997
Yuval Mintz1a635e42016-08-15 10:42:43 +03002998static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
Manish Chopra55482ed2016-03-04 12:35:06 -05002999{
3000 dma_addr_t mapping;
3001 int i;
3002
Mintz, Yuval496e0512016-11-29 16:47:09 +02003003 /* Don't perform FW aggregations in case of XDP */
3004 if (edev->xdp_prog)
3005 edev->gro_disable = 1;
3006
Manish Chopra55482ed2016-03-04 12:35:06 -05003007 if (edev->gro_disable)
3008 return 0;
3009
3010 if (edev->ndev->mtu > PAGE_SIZE) {
3011 edev->gro_disable = 1;
3012 return 0;
3013 }
3014
3015 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
3016 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
Mintz, Yuval01e23012016-11-29 16:47:00 +02003017 struct sw_rx_data *replace_buf = &tpa_info->buffer;
Manish Chopra55482ed2016-03-04 12:35:06 -05003018
3019 replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
3020 if (unlikely(!replace_buf->data)) {
3021 DP_NOTICE(edev,
3022 "Failed to allocate TPA skb pool [replacement buffer]\n");
3023 goto err;
3024 }
3025
3026 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
Mintz, Yuval95129252016-11-02 16:36:46 +02003027 PAGE_SIZE, DMA_FROM_DEVICE);
Manish Chopra55482ed2016-03-04 12:35:06 -05003028 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
3029 DP_NOTICE(edev,
3030 "Failed to map TPA replacement buffer\n");
3031 goto err;
3032 }
3033
Manish Chopra09ec8e72016-05-18 07:43:57 -04003034 replace_buf->mapping = mapping;
Mintz, Yuval01e23012016-11-29 16:47:00 +02003035 tpa_info->buffer.page_offset = 0;
3036 tpa_info->buffer_mapping = mapping;
3037 tpa_info->state = QEDE_AGG_STATE_NONE;
Manish Chopra55482ed2016-03-04 12:35:06 -05003038 }
3039
3040 return 0;
3041err:
3042 qede_free_sge_mem(edev, rxq);
3043 edev->gro_disable = 1;
3044 return -ENOMEM;
3045}
3046
Yuval Mintz29502192015-10-26 11:02:29 +02003047/* This function allocates all memory needed per Rx queue */
Yuval Mintz1a635e42016-08-15 10:42:43 +03003048static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
Yuval Mintz29502192015-10-26 11:02:29 +02003049{
Manish Chopraf86af2d2016-04-20 03:03:27 -04003050 int i, rc, size;
Yuval Mintz29502192015-10-26 11:02:29 +02003051
3052 rxq->num_rx_buffers = edev->q_num_rx_buffers;
3053
Yuval Mintz1a635e42016-08-15 10:42:43 +03003054 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
3055
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05003056 if (rxq->rx_buf_size > PAGE_SIZE)
3057 rxq->rx_buf_size = PAGE_SIZE;
3058
Mintz, Yuval496e0512016-11-29 16:47:09 +02003059 /* Segment size to spilt a page in multiple equal parts,
3060 * unless XDP is used in which case we'd use the entire page.
3061 */
3062 if (!edev->xdp_prog)
3063 rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
3064 else
3065 rxq->rx_buf_seg_size = PAGE_SIZE;
Yuval Mintz29502192015-10-26 11:02:29 +02003066
3067 /* Allocate the parallel driver ring for Rx buffers */
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05003068 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
Yuval Mintz29502192015-10-26 11:02:29 +02003069 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
3070 if (!rxq->sw_rx_ring) {
3071 DP_ERR(edev, "Rx buffers ring allocation failed\n");
Manish Chopraf86af2d2016-04-20 03:03:27 -04003072 rc = -ENOMEM;
Yuval Mintz29502192015-10-26 11:02:29 +02003073 goto err;
3074 }
3075
3076 /* Allocate FW Rx ring */
3077 rc = edev->ops->common->chain_alloc(edev->cdev,
3078 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
3079 QED_CHAIN_MODE_NEXT_PTR,
Yuval Mintza91eb522016-06-03 14:35:32 +03003080 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05003081 RX_RING_SIZE,
Yuval Mintz29502192015-10-26 11:02:29 +02003082 sizeof(struct eth_rx_bd),
3083 &rxq->rx_bd_ring);
3084
3085 if (rc)
3086 goto err;
3087
3088 /* Allocate FW completion ring */
3089 rc = edev->ops->common->chain_alloc(edev->cdev,
3090 QED_CHAIN_USE_TO_CONSUME,
3091 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +03003092 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05003093 RX_RING_SIZE,
Yuval Mintz29502192015-10-26 11:02:29 +02003094 sizeof(union eth_rx_cqe),
3095 &rxq->rx_comp_ring);
3096 if (rc)
3097 goto err;
3098
3099 /* Allocate buffers for the Rx ring */
3100 for (i = 0; i < rxq->num_rx_buffers; i++) {
Mintz, Yuval9eb22352016-11-29 16:47:08 +02003101 rc = qede_alloc_rx_buffer(rxq);
Manish Chopraf86af2d2016-04-20 03:03:27 -04003102 if (rc) {
3103 DP_ERR(edev,
3104 "Rx buffers allocation failed at index %d\n", i);
3105 goto err;
3106 }
Yuval Mintz29502192015-10-26 11:02:29 +02003107 }
3108
Manish Chopraf86af2d2016-04-20 03:03:27 -04003109 rc = qede_alloc_sge_mem(edev, rxq);
Yuval Mintz29502192015-10-26 11:02:29 +02003110err:
Manish Chopraf86af2d2016-04-20 03:03:27 -04003111 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02003112}
3113
Yuval Mintz1a635e42016-08-15 10:42:43 +03003114static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
Yuval Mintz29502192015-10-26 11:02:29 +02003115{
3116 /* Free the parallel SW ring */
3117 kfree(txq->sw_tx_ring);
3118
3119 /* Free the real RQ ring used by FW */
3120 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
3121}
3122
3123/* This function allocates all memory needed per Tx queue */
Yuval Mintz1a635e42016-08-15 10:42:43 +03003124static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
Yuval Mintz29502192015-10-26 11:02:29 +02003125{
3126 int size, rc;
3127 union eth_tx_bd_types *p_virt;
3128
3129 txq->num_tx_buffers = edev->q_num_tx_buffers;
3130
3131 /* Allocate the parallel driver ring for Tx buffers */
Mintz, Yuval087892d2016-10-29 17:04:35 +03003132 size = sizeof(*txq->sw_tx_ring) * TX_RING_SIZE;
Yuval Mintz29502192015-10-26 11:02:29 +02003133 txq->sw_tx_ring = kzalloc(size, GFP_KERNEL);
3134 if (!txq->sw_tx_ring) {
3135 DP_NOTICE(edev, "Tx buffers ring allocation failed\n");
3136 goto err;
3137 }
3138
3139 rc = edev->ops->common->chain_alloc(edev->cdev,
3140 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
3141 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +03003142 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval087892d2016-10-29 17:04:35 +03003143 TX_RING_SIZE,
Yuval Mintza91eb522016-06-03 14:35:32 +03003144 sizeof(*p_virt), &txq->tx_pbl);
Yuval Mintz29502192015-10-26 11:02:29 +02003145 if (rc)
3146 goto err;
3147
3148 return 0;
3149
3150err:
3151 qede_free_mem_txq(edev, txq);
3152 return -ENOMEM;
3153}
3154
3155/* This function frees all memory of a single fp */
Yuval Mintz1a635e42016-08-15 10:42:43 +03003156static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
Yuval Mintz29502192015-10-26 11:02:29 +02003157{
Yuval Mintz29502192015-10-26 11:02:29 +02003158 qede_free_mem_sb(edev, fp->sb_info);
3159
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003160 if (fp->type & QEDE_FASTPATH_RX)
3161 qede_free_mem_rxq(edev, fp->rxq);
Yuval Mintz29502192015-10-26 11:02:29 +02003162
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003163 if (fp->type & QEDE_FASTPATH_TX)
Mintz, Yuval80439a12016-11-29 16:47:02 +02003164 qede_free_mem_txq(edev, fp->txq);
Yuval Mintz29502192015-10-26 11:02:29 +02003165}
3166
3167/* This function allocates all memory needed for a single fp (i.e. an entity
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003168 * which contains status block, one rx queue and/or multiple per-TC tx queues.
Yuval Mintz29502192015-10-26 11:02:29 +02003169 */
Yuval Mintz1a635e42016-08-15 10:42:43 +03003170static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
Yuval Mintz29502192015-10-26 11:02:29 +02003171{
Mintz, Yuval80439a12016-11-29 16:47:02 +02003172 int rc;
Yuval Mintz29502192015-10-26 11:02:29 +02003173
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003174 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
Yuval Mintz29502192015-10-26 11:02:29 +02003175 if (rc)
3176 goto err;
3177
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003178 if (fp->type & QEDE_FASTPATH_RX) {
3179 rc = qede_alloc_mem_rxq(edev, fp->rxq);
Yuval Mintz29502192015-10-26 11:02:29 +02003180 if (rc)
3181 goto err;
3182 }
3183
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003184 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02003185 rc = qede_alloc_mem_txq(edev, fp->txq);
3186 if (rc)
3187 goto err;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003188 }
3189
Yuval Mintz29502192015-10-26 11:02:29 +02003190 return 0;
Yuval Mintz29502192015-10-26 11:02:29 +02003191err:
Manish Chopraf86af2d2016-04-20 03:03:27 -04003192 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02003193}
3194
3195static void qede_free_mem_load(struct qede_dev *edev)
3196{
3197 int i;
3198
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003199 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +02003200 struct qede_fastpath *fp = &edev->fp_array[i];
3201
3202 qede_free_mem_fp(edev, fp);
3203 }
3204}
3205
3206/* This function allocates all qede memory at NIC load. */
3207static int qede_alloc_mem_load(struct qede_dev *edev)
3208{
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003209 int rc = 0, queue_id;
Yuval Mintz29502192015-10-26 11:02:29 +02003210
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003211 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
3212 struct qede_fastpath *fp = &edev->fp_array[queue_id];
Yuval Mintz29502192015-10-26 11:02:29 +02003213
3214 rc = qede_alloc_mem_fp(edev, fp);
Manish Chopraf86af2d2016-04-20 03:03:27 -04003215 if (rc) {
Yuval Mintz29502192015-10-26 11:02:29 +02003216 DP_ERR(edev,
Manish Chopraf86af2d2016-04-20 03:03:27 -04003217 "Failed to allocate memory for fastpath - rss id = %d\n",
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003218 queue_id);
Manish Chopraf86af2d2016-04-20 03:03:27 -04003219 qede_free_mem_load(edev);
3220 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02003221 }
Yuval Mintz29502192015-10-26 11:02:29 +02003222 }
3223
3224 return 0;
3225}
3226
3227/* This function inits fp content and resets the SB, RXQ and TXQ structures */
3228static void qede_init_fp(struct qede_dev *edev)
3229{
Mintz, Yuval80439a12016-11-29 16:47:02 +02003230 int queue_id, rxq_index = 0, txq_index = 0;
Yuval Mintz29502192015-10-26 11:02:29 +02003231 struct qede_fastpath *fp;
3232
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003233 for_each_queue(queue_id) {
3234 fp = &edev->fp_array[queue_id];
Yuval Mintz29502192015-10-26 11:02:29 +02003235
3236 fp->edev = edev;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003237 fp->id = queue_id;
Yuval Mintz29502192015-10-26 11:02:29 +02003238
Yuval Mintz29502192015-10-26 11:02:29 +02003239
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003240 if (fp->type & QEDE_FASTPATH_RX) {
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003241 fp->rxq->rxq_id = rxq_index++;
Mintz, Yuval9eb22352016-11-29 16:47:08 +02003242 fp->rxq->dev = &edev->pdev->dev;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003243 }
Yuval Mintz29502192015-10-26 11:02:29 +02003244
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003245 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02003246 fp->txq->index = txq_index++;
3247 if (edev->dev_info.is_legacy)
3248 fp->txq->is_legacy = 1;
Mintz, Yuval9eb22352016-11-29 16:47:08 +02003249 fp->txq->dev = &edev->pdev->dev;
Yuval Mintz29502192015-10-26 11:02:29 +02003250 }
3251
3252 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003253 edev->ndev->name, queue_id);
Yuval Mintz29502192015-10-26 11:02:29 +02003254 }
Manish Chopra55482ed2016-03-04 12:35:06 -05003255
3256 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
Yuval Mintz29502192015-10-26 11:02:29 +02003257}
3258
3259static int qede_set_real_num_queues(struct qede_dev *edev)
3260{
3261 int rc = 0;
3262
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003263 rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
Yuval Mintz29502192015-10-26 11:02:29 +02003264 if (rc) {
3265 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
3266 return rc;
3267 }
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003268
3269 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
Yuval Mintz29502192015-10-26 11:02:29 +02003270 if (rc) {
3271 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
3272 return rc;
3273 }
3274
3275 return 0;
3276}
3277
3278static void qede_napi_disable_remove(struct qede_dev *edev)
3279{
3280 int i;
3281
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003282 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +02003283 napi_disable(&edev->fp_array[i].napi);
3284
3285 netif_napi_del(&edev->fp_array[i].napi);
3286 }
3287}
3288
3289static void qede_napi_add_enable(struct qede_dev *edev)
3290{
3291 int i;
3292
3293 /* Add NAPI objects */
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003294 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +02003295 netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
3296 qede_poll, NAPI_POLL_WEIGHT);
3297 napi_enable(&edev->fp_array[i].napi);
3298 }
3299}
3300
3301static void qede_sync_free_irqs(struct qede_dev *edev)
3302{
3303 int i;
3304
3305 for (i = 0; i < edev->int_info.used_cnt; i++) {
3306 if (edev->int_info.msix_cnt) {
3307 synchronize_irq(edev->int_info.msix[i].vector);
3308 free_irq(edev->int_info.msix[i].vector,
3309 &edev->fp_array[i]);
3310 } else {
3311 edev->ops->common->simd_handler_clean(edev->cdev, i);
3312 }
3313 }
3314
3315 edev->int_info.used_cnt = 0;
3316}
3317
3318static int qede_req_msix_irqs(struct qede_dev *edev)
3319{
3320 int i, rc;
3321
3322 /* Sanitize number of interrupts == number of prepared RSS queues */
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003323 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
Yuval Mintz29502192015-10-26 11:02:29 +02003324 DP_ERR(edev,
3325 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003326 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
Yuval Mintz29502192015-10-26 11:02:29 +02003327 return -EINVAL;
3328 }
3329
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003330 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
Yuval Mintz29502192015-10-26 11:02:29 +02003331 rc = request_irq(edev->int_info.msix[i].vector,
3332 qede_msix_fp_int, 0, edev->fp_array[i].name,
3333 &edev->fp_array[i]);
3334 if (rc) {
3335 DP_ERR(edev, "Request fp %d irq failed\n", i);
3336 qede_sync_free_irqs(edev);
3337 return rc;
3338 }
3339 DP_VERBOSE(edev, NETIF_MSG_INTR,
3340 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
3341 edev->fp_array[i].name, i,
3342 &edev->fp_array[i]);
3343 edev->int_info.used_cnt++;
3344 }
3345
3346 return 0;
3347}
3348
3349static void qede_simd_fp_handler(void *cookie)
3350{
3351 struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
3352
3353 napi_schedule_irqoff(&fp->napi);
3354}
3355
3356static int qede_setup_irqs(struct qede_dev *edev)
3357{
3358 int i, rc = 0;
3359
3360 /* Learn Interrupt configuration */
3361 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
3362 if (rc)
3363 return rc;
3364
3365 if (edev->int_info.msix_cnt) {
3366 rc = qede_req_msix_irqs(edev);
3367 if (rc)
3368 return rc;
3369 edev->ndev->irq = edev->int_info.msix[0].vector;
3370 } else {
3371 const struct qed_common_ops *ops;
3372
3373 /* qed should learn receive the RSS ids and callbacks */
3374 ops = edev->ops->common;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003375 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
Yuval Mintz29502192015-10-26 11:02:29 +02003376 ops->simd_handler_config(edev->cdev,
3377 &edev->fp_array[i], i,
3378 qede_simd_fp_handler);
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003379 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
Yuval Mintz29502192015-10-26 11:02:29 +02003380 }
3381 return 0;
3382}
3383
3384static int qede_drain_txq(struct qede_dev *edev,
Yuval Mintz1a635e42016-08-15 10:42:43 +03003385 struct qede_tx_queue *txq, bool allow_drain)
Yuval Mintz29502192015-10-26 11:02:29 +02003386{
3387 int rc, cnt = 1000;
3388
3389 while (txq->sw_tx_cons != txq->sw_tx_prod) {
3390 if (!cnt) {
3391 if (allow_drain) {
3392 DP_NOTICE(edev,
3393 "Tx queue[%d] is stuck, requesting MCP to drain\n",
3394 txq->index);
3395 rc = edev->ops->common->drain(edev->cdev);
3396 if (rc)
3397 return rc;
3398 return qede_drain_txq(edev, txq, false);
3399 }
3400 DP_NOTICE(edev,
3401 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
3402 txq->index, txq->sw_tx_prod,
3403 txq->sw_tx_cons);
3404 return -ENODEV;
3405 }
3406 cnt--;
3407 usleep_range(1000, 2000);
3408 barrier();
3409 }
3410
3411 /* FW finished processing, wait for HW to transmit all tx packets */
3412 usleep_range(1000, 2000);
3413
3414 return 0;
3415}
3416
Mintz, Yuval3da7a372016-11-29 16:47:06 +02003417static int qede_stop_txq(struct qede_dev *edev,
3418 struct qede_tx_queue *txq, int rss_id)
3419{
3420 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
3421}
3422
Yuval Mintz29502192015-10-26 11:02:29 +02003423static int qede_stop_queues(struct qede_dev *edev)
3424{
3425 struct qed_update_vport_params vport_update_params;
3426 struct qed_dev *cdev = edev->cdev;
Mintz, Yuval80439a12016-11-29 16:47:02 +02003427 struct qede_fastpath *fp;
3428 int rc, i;
Yuval Mintz29502192015-10-26 11:02:29 +02003429
3430 /* Disable the vport */
3431 memset(&vport_update_params, 0, sizeof(vport_update_params));
3432 vport_update_params.vport_id = 0;
3433 vport_update_params.update_vport_active_flg = 1;
3434 vport_update_params.vport_active_flg = 0;
3435 vport_update_params.update_rss_flg = 0;
3436
3437 rc = edev->ops->vport_update(cdev, &vport_update_params);
3438 if (rc) {
3439 DP_ERR(edev, "Failed to update vport\n");
3440 return rc;
3441 }
3442
3443 /* Flush Tx queues. If needed, request drain from MCP */
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003444 for_each_queue(i) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02003445 fp = &edev->fp_array[i];
Yuval Mintz29502192015-10-26 11:02:29 +02003446
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003447 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02003448 rc = qede_drain_txq(edev, fp->txq, true);
3449 if (rc)
3450 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02003451 }
3452 }
3453
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003454 /* Stop all Queues in reverse order */
3455 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02003456 fp = &edev->fp_array[i];
Yuval Mintz29502192015-10-26 11:02:29 +02003457
Mintz, Yuval80439a12016-11-29 16:47:02 +02003458 /* Stop the Tx Queue(s) */
3459 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02003460 rc = qede_stop_txq(edev, fp->txq, i);
3461 if (rc)
3462 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02003463 }
3464
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003465 /* Stop the Rx Queue */
Mintz, Yuval80439a12016-11-29 16:47:02 +02003466 if (fp->type & QEDE_FASTPATH_RX) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02003467 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003468 if (rc) {
3469 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
3470 return rc;
3471 }
Yuval Mintz29502192015-10-26 11:02:29 +02003472 }
Mintz, Yuval496e0512016-11-29 16:47:09 +02003473
3474 if (fp->type & QEDE_FASTPATH_XDP)
3475 bpf_prog_put(fp->rxq->xdp_prog);
Yuval Mintz29502192015-10-26 11:02:29 +02003476 }
3477
3478 /* Stop the vport */
3479 rc = edev->ops->vport_stop(cdev, 0);
3480 if (rc)
3481 DP_ERR(edev, "Failed to stop VPORT\n");
3482
3483 return rc;
3484}
3485
Mintz, Yuval3da7a372016-11-29 16:47:06 +02003486static int qede_start_txq(struct qede_dev *edev,
3487 struct qede_fastpath *fp,
3488 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
3489{
3490 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
3491 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
3492 struct qed_queue_start_common_params params;
3493 struct qed_txq_start_ret_params ret_params;
3494 int rc;
3495
3496 memset(&params, 0, sizeof(params));
3497 memset(&ret_params, 0, sizeof(ret_params));
3498
3499 params.queue_id = txq->index;
3500 params.sb = fp->sb_info->igu_sb_id;
3501 params.sb_idx = sb_idx;
3502
3503 rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
3504 page_cnt, &ret_params);
3505 if (rc) {
3506 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
3507 return rc;
3508 }
3509
3510 txq->doorbell_addr = ret_params.p_doorbell;
3511 txq->handle = ret_params.p_handle;
3512
3513 /* Determine the FW consumer address associated */
3514 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
3515
3516 /* Prepare the doorbell parameters */
3517 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
3518 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
3519 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
3520 DQ_XCM_ETH_TX_BD_PROD_CMD);
3521 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
3522
3523 return rc;
3524}
3525
Yuval Mintza0d26d52016-06-19 15:18:13 +03003526static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
Yuval Mintz29502192015-10-26 11:02:29 +02003527{
Manish Chopra088c8612016-03-04 12:35:05 -05003528 int vlan_removal_en = 1;
Yuval Mintz29502192015-10-26 11:02:29 +02003529 struct qed_dev *cdev = edev->cdev;
Yuval Mintz29502192015-10-26 11:02:29 +02003530 struct qed_update_vport_params vport_update_params;
3531 struct qed_queue_start_common_params q_params;
Yuval Mintzfefb0202016-05-11 16:36:19 +03003532 struct qed_dev_info *qed_info = &edev->dev_info.common;
Manish Chopra088c8612016-03-04 12:35:05 -05003533 struct qed_start_vport_params start = {0};
Sudarsana Reddy Kalluru961acde2016-04-10 12:43:01 +03003534 bool reset_rss_indir = false;
Mintz, Yuval80439a12016-11-29 16:47:02 +02003535 int rc, i;
Yuval Mintz29502192015-10-26 11:02:29 +02003536
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003537 if (!edev->num_queues) {
Yuval Mintz29502192015-10-26 11:02:29 +02003538 DP_ERR(edev,
3539 "Cannot update V-VPORT as active as there are no Rx queues\n");
3540 return -EINVAL;
3541 }
3542
Manish Chopra55482ed2016-03-04 12:35:06 -05003543 start.gro_enable = !edev->gro_disable;
Manish Chopra088c8612016-03-04 12:35:05 -05003544 start.mtu = edev->ndev->mtu;
3545 start.vport_id = 0;
3546 start.drop_ttl0 = true;
3547 start.remove_inner_vlan = vlan_removal_en;
Yuval Mintz7f7a1442016-07-27 14:45:22 +03003548 start.clear_stats = clear_stats;
Manish Chopra088c8612016-03-04 12:35:05 -05003549
3550 rc = edev->ops->vport_start(cdev, &start);
Yuval Mintz29502192015-10-26 11:02:29 +02003551
3552 if (rc) {
3553 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
3554 return rc;
3555 }
3556
3557 DP_VERBOSE(edev, NETIF_MSG_IFUP,
3558 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
Manish Chopra088c8612016-03-04 12:35:05 -05003559 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
Yuval Mintz29502192015-10-26 11:02:29 +02003560
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003561 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +02003562 struct qede_fastpath *fp = &edev->fp_array[i];
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003563 dma_addr_t p_phys_table;
3564 u32 page_cnt;
Yuval Mintz29502192015-10-26 11:02:29 +02003565
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003566 if (fp->type & QEDE_FASTPATH_RX) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02003567 struct qed_rxq_start_ret_params ret_params;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003568 struct qede_rx_queue *rxq = fp->rxq;
3569 __le16 *val;
Yuval Mintz29502192015-10-26 11:02:29 +02003570
Mintz, Yuval3da7a372016-11-29 16:47:06 +02003571 memset(&ret_params, 0, sizeof(ret_params));
Yuval Mintz29502192015-10-26 11:02:29 +02003572 memset(&q_params, 0, sizeof(q_params));
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003573 q_params.queue_id = rxq->rxq_id;
3574 q_params.vport_id = 0;
3575 q_params.sb = fp->sb_info->igu_sb_id;
3576 q_params.sb_idx = RX_PI;
3577
3578 p_phys_table =
3579 qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
3580 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
3581
Mintz, Yuval3da7a372016-11-29 16:47:06 +02003582 rc = edev->ops->q_rx_start(cdev, i, &q_params,
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003583 rxq->rx_buf_size,
3584 rxq->rx_bd_ring.p_phys_addr,
3585 p_phys_table,
Mintz, Yuval3da7a372016-11-29 16:47:06 +02003586 page_cnt, &ret_params);
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003587 if (rc) {
3588 DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
3589 rc);
3590 return rc;
3591 }
3592
Mintz, Yuval3da7a372016-11-29 16:47:06 +02003593 /* Use the return parameters */
3594 rxq->hw_rxq_prod_addr = ret_params.p_prod;
3595 rxq->handle = ret_params.p_handle;
3596
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003597 val = &fp->sb_info->sb_virt->pi_array[RX_PI];
3598 rxq->hw_cons_ptr = val;
3599
3600 qede_update_rx_prod(edev, rxq);
3601 }
3602
Mintz, Yuval496e0512016-11-29 16:47:09 +02003603 if (fp->type & QEDE_FASTPATH_XDP) {
3604 fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
3605 if (IS_ERR(fp->rxq->xdp_prog)) {
3606 rc = PTR_ERR(fp->rxq->xdp_prog);
3607 fp->rxq->xdp_prog = NULL;
3608 return rc;
3609 }
3610 }
3611
Mintz, Yuval80439a12016-11-29 16:47:02 +02003612 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02003613 rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
3614 if (rc)
Yuval Mintz29502192015-10-26 11:02:29 +02003615 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02003616 }
3617 }
3618
3619 /* Prepare and send the vport enable */
3620 memset(&vport_update_params, 0, sizeof(vport_update_params));
Manish Chopra088c8612016-03-04 12:35:05 -05003621 vport_update_params.vport_id = start.vport_id;
Yuval Mintz29502192015-10-26 11:02:29 +02003622 vport_update_params.update_vport_active_flg = 1;
3623 vport_update_params.vport_active_flg = 1;
3624
Yuval Mintz831bfb0e2016-05-11 16:36:25 +03003625 if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
3626 qed_info->tx_switching) {
3627 vport_update_params.update_tx_switching_flg = 1;
3628 vport_update_params.tx_switching_flg = 1;
3629 }
3630
Yuval Mintz29502192015-10-26 11:02:29 +02003631 /* Fill struct with RSS params */
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003632 if (QEDE_RSS_COUNT(edev) > 1) {
Yuval Mintz29502192015-10-26 11:02:29 +02003633 vport_update_params.update_rss_flg = 1;
Sudarsana Reddy Kalluru961acde2016-04-10 12:43:01 +03003634
3635 /* Need to validate current RSS config uses valid entries */
3636 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
3637 if (edev->rss_params.rss_ind_table[i] >=
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003638 QEDE_RSS_COUNT(edev)) {
Sudarsana Reddy Kalluru961acde2016-04-10 12:43:01 +03003639 reset_rss_indir = true;
3640 break;
3641 }
3642 }
3643
3644 if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
3645 reset_rss_indir) {
3646 u16 val;
3647
3648 for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
3649 u16 indir_val;
3650
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003651 val = QEDE_RSS_COUNT(edev);
Sudarsana Reddy Kalluru961acde2016-04-10 12:43:01 +03003652 indir_val = ethtool_rxfh_indir_default(i, val);
3653 edev->rss_params.rss_ind_table[i] = indir_val;
3654 }
3655 edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
3656 }
3657
3658 if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
3659 netdev_rss_key_fill(edev->rss_params.rss_key,
3660 sizeof(edev->rss_params.rss_key));
3661 edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
3662 }
3663
3664 if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
3665 edev->rss_params.rss_caps = QED_RSS_IPV4 |
3666 QED_RSS_IPV6 |
3667 QED_RSS_IPV4_TCP |
3668 QED_RSS_IPV6_TCP;
3669 edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
3670 }
3671
3672 memcpy(&vport_update_params.rss_params, &edev->rss_params,
3673 sizeof(vport_update_params.rss_params));
Yuval Mintz29502192015-10-26 11:02:29 +02003674 } else {
Sudarsana Reddy Kalluru961acde2016-04-10 12:43:01 +03003675 memset(&vport_update_params.rss_params, 0,
3676 sizeof(vport_update_params.rss_params));
Yuval Mintz29502192015-10-26 11:02:29 +02003677 }
Yuval Mintz29502192015-10-26 11:02:29 +02003678
3679 rc = edev->ops->vport_update(cdev, &vport_update_params);
3680 if (rc) {
3681 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
3682 return rc;
3683 }
3684
3685 return 0;
3686}
3687
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02003688static int qede_set_mcast_rx_mac(struct qede_dev *edev,
3689 enum qed_filter_xcast_params_type opcode,
3690 unsigned char *mac, int num_macs)
3691{
3692 struct qed_filter_params filter_cmd;
3693 int i;
3694
3695 memset(&filter_cmd, 0, sizeof(filter_cmd));
3696 filter_cmd.type = QED_FILTER_TYPE_MCAST;
3697 filter_cmd.filter.mcast.type = opcode;
3698 filter_cmd.filter.mcast.num = num_macs;
3699
3700 for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
3701 ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
3702
3703 return edev->ops->filter_config(edev->cdev, &filter_cmd);
3704}
3705
Yuval Mintz29502192015-10-26 11:02:29 +02003706enum qede_unload_mode {
3707 QEDE_UNLOAD_NORMAL,
3708};
3709
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003710static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
3711 bool is_locked)
Yuval Mintz29502192015-10-26 11:02:29 +02003712{
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02003713 struct qed_link_params link_params;
Yuval Mintz29502192015-10-26 11:02:29 +02003714 int rc;
3715
3716 DP_INFO(edev, "Starting qede unload\n");
3717
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003718 if (!is_locked)
3719 __qede_lock(edev);
3720
Ram Amranicee9fbd2016-10-01 21:59:56 +03003721 qede_roce_dev_event_close(edev);
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02003722 edev->state = QEDE_STATE_CLOSED;
3723
Yuval Mintz29502192015-10-26 11:02:29 +02003724 /* Close OS Tx */
3725 netif_tx_disable(edev->ndev);
3726 netif_carrier_off(edev->ndev);
3727
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02003728 /* Reset the link */
3729 memset(&link_params, 0, sizeof(link_params));
3730 link_params.link_up = false;
3731 edev->ops->common->set_link(edev->cdev, &link_params);
Yuval Mintz29502192015-10-26 11:02:29 +02003732 rc = qede_stop_queues(edev);
3733 if (rc) {
3734 qede_sync_free_irqs(edev);
3735 goto out;
3736 }
3737
3738 DP_INFO(edev, "Stopped Queues\n");
3739
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02003740 qede_vlan_mark_nonconfigured(edev);
Yuval Mintz29502192015-10-26 11:02:29 +02003741 edev->ops->fastpath_stop(edev->cdev);
3742
3743 /* Release the interrupts */
3744 qede_sync_free_irqs(edev);
3745 edev->ops->common->set_fp_int(edev->cdev, 0);
3746
3747 qede_napi_disable_remove(edev);
3748
3749 qede_free_mem_load(edev);
3750 qede_free_fp_array(edev);
3751
3752out:
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003753 if (!is_locked)
3754 __qede_unlock(edev);
Yuval Mintz29502192015-10-26 11:02:29 +02003755 DP_INFO(edev, "Ending qede unload\n");
3756}
3757
3758enum qede_load_mode {
3759 QEDE_LOAD_NORMAL,
Yuval Mintza0d26d52016-06-19 15:18:13 +03003760 QEDE_LOAD_RELOAD,
Yuval Mintz29502192015-10-26 11:02:29 +02003761};
3762
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003763static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
3764 bool is_locked)
Yuval Mintz29502192015-10-26 11:02:29 +02003765{
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02003766 struct qed_link_params link_params;
3767 struct qed_link_output link_output;
Yuval Mintz29502192015-10-26 11:02:29 +02003768 int rc;
3769
3770 DP_INFO(edev, "Starting qede load\n");
3771
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003772 if (!is_locked)
3773 __qede_lock(edev);
3774
Yuval Mintz29502192015-10-26 11:02:29 +02003775 rc = qede_set_num_queues(edev);
3776 if (rc)
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003777 goto out;
Yuval Mintz29502192015-10-26 11:02:29 +02003778
3779 rc = qede_alloc_fp_array(edev);
3780 if (rc)
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003781 goto out;
Yuval Mintz29502192015-10-26 11:02:29 +02003782
3783 qede_init_fp(edev);
3784
3785 rc = qede_alloc_mem_load(edev);
3786 if (rc)
3787 goto err1;
Mintz, Yuval80439a12016-11-29 16:47:02 +02003788 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
3789 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
Yuval Mintz29502192015-10-26 11:02:29 +02003790
3791 rc = qede_set_real_num_queues(edev);
3792 if (rc)
3793 goto err2;
3794
3795 qede_napi_add_enable(edev);
3796 DP_INFO(edev, "Napi added and enabled\n");
3797
3798 rc = qede_setup_irqs(edev);
3799 if (rc)
3800 goto err3;
3801 DP_INFO(edev, "Setup IRQs succeeded\n");
3802
Yuval Mintza0d26d52016-06-19 15:18:13 +03003803 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
Yuval Mintz29502192015-10-26 11:02:29 +02003804 if (rc)
3805 goto err4;
3806 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
3807
3808 /* Add primary mac and set Rx filters */
3809 ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
3810
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02003811 /* Program un-configured VLANs */
3812 qede_configure_vlan_filters(edev);
3813
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02003814 /* Ask for link-up using current configuration */
3815 memset(&link_params, 0, sizeof(link_params));
3816 link_params.link_up = true;
3817 edev->ops->common->set_link(edev->cdev, &link_params);
3818
3819 /* Query whether link is already-up */
3820 memset(&link_output, 0, sizeof(link_output));
3821 edev->ops->common->get_link(edev->cdev, &link_output);
Ram Amranicee9fbd2016-10-01 21:59:56 +03003822 qede_roce_dev_event_open(edev);
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02003823 qede_link_update(edev, &link_output);
3824
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003825 edev->state = QEDE_STATE_OPEN;
3826
Yuval Mintz29502192015-10-26 11:02:29 +02003827 DP_INFO(edev, "Ending successfully qede load\n");
3828
Yuval Mintz29502192015-10-26 11:02:29 +02003829
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003830 goto out;
Yuval Mintz29502192015-10-26 11:02:29 +02003831err4:
3832 qede_sync_free_irqs(edev);
3833 memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
3834err3:
3835 qede_napi_disable_remove(edev);
3836err2:
3837 qede_free_mem_load(edev);
3838err1:
3839 edev->ops->common->set_fp_int(edev->cdev, 0);
3840 qede_free_fp_array(edev);
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04003841 edev->num_queues = 0;
3842 edev->fp_num_tx = 0;
3843 edev->fp_num_rx = 0;
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003844out:
3845 if (!is_locked)
3846 __qede_unlock(edev);
3847
Yuval Mintz29502192015-10-26 11:02:29 +02003848 return rc;
3849}
3850
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003851/* 'func' should be able to run between unload and reload assuming interface
3852 * is actually running, or afterwards in case it's currently DOWN.
3853 */
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02003854void qede_reload(struct qede_dev *edev,
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003855 struct qede_reload_args *args, bool is_locked)
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02003856{
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003857 if (!is_locked)
3858 __qede_lock(edev);
3859
3860 /* Since qede_lock is held, internal state wouldn't change even
3861 * if netdev state would start transitioning. Check whether current
3862 * internal configuration indicates device is up, then reload.
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02003863 */
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003864 if (edev->state == QEDE_STATE_OPEN) {
3865 qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
3866 if (args)
3867 args->func(edev, args);
3868 qede_load(edev, QEDE_LOAD_RELOAD, true);
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02003869
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003870 /* Since no one is going to do it for us, re-configure */
3871 qede_config_rx_mode(edev->ndev);
3872 } else if (args) {
3873 args->func(edev, args);
3874 }
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02003875
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003876 if (!is_locked)
3877 __qede_unlock(edev);
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02003878}
3879
Yuval Mintz29502192015-10-26 11:02:29 +02003880/* called with rtnl_lock */
3881static int qede_open(struct net_device *ndev)
3882{
3883 struct qede_dev *edev = netdev_priv(ndev);
Manish Choprab18e1702016-04-14 01:38:30 -04003884 int rc;
Yuval Mintz29502192015-10-26 11:02:29 +02003885
3886 netif_carrier_off(ndev);
3887
3888 edev->ops->common->set_power_state(edev->cdev, PCI_D0);
3889
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003890 rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
Manish Choprab18e1702016-04-14 01:38:30 -04003891 if (rc)
3892 return rc;
3893
Alexander Duyckf9f082a2016-06-16 12:22:57 -07003894 udp_tunnel_get_rx_info(ndev);
3895
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02003896 edev->ops->common->update_drv_state(edev->cdev, true);
3897
Manish Choprab18e1702016-04-14 01:38:30 -04003898 return 0;
Yuval Mintz29502192015-10-26 11:02:29 +02003899}
3900
3901static int qede_close(struct net_device *ndev)
3902{
3903 struct qede_dev *edev = netdev_priv(ndev);
3904
Mintz, Yuval567b3c12016-11-29 16:47:05 +02003905 qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
Yuval Mintz29502192015-10-26 11:02:29 +02003906
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02003907 edev->ops->common->update_drv_state(edev->cdev, false);
3908
Yuval Mintz29502192015-10-26 11:02:29 +02003909 return 0;
3910}
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02003911
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02003912static void qede_link_update(void *dev, struct qed_link_output *link)
3913{
3914 struct qede_dev *edev = dev;
3915
3916 if (!netif_running(edev->ndev)) {
3917 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
3918 return;
3919 }
3920
3921 if (link->link_up) {
Yuval Mintz8e025ae2016-02-24 16:52:47 +02003922 if (!netif_carrier_ok(edev->ndev)) {
3923 DP_NOTICE(edev, "Link is up\n");
3924 netif_tx_start_all_queues(edev->ndev);
3925 netif_carrier_on(edev->ndev);
3926 }
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02003927 } else {
Yuval Mintz8e025ae2016-02-24 16:52:47 +02003928 if (netif_carrier_ok(edev->ndev)) {
3929 DP_NOTICE(edev, "Link is down\n");
3930 netif_tx_disable(edev->ndev);
3931 netif_carrier_off(edev->ndev);
3932 }
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02003933 }
3934}
3935
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02003936static int qede_set_mac_addr(struct net_device *ndev, void *p)
3937{
3938 struct qede_dev *edev = netdev_priv(ndev);
3939 struct sockaddr *addr = p;
3940 int rc;
3941
3942 ASSERT_RTNL(); /* @@@TBD To be removed */
3943
3944 DP_INFO(edev, "Set_mac_addr called\n");
3945
3946 if (!is_valid_ether_addr(addr->sa_data)) {
3947 DP_NOTICE(edev, "The MAC address is not valid\n");
3948 return -EFAULT;
3949 }
3950
Yuval Mintzeff16962016-05-11 16:36:21 +03003951 if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
3952 DP_NOTICE(edev, "qed prevents setting MAC\n");
3953 return -EINVAL;
3954 }
3955
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02003956 ether_addr_copy(ndev->dev_addr, addr->sa_data);
3957
3958 if (!netif_running(ndev)) {
3959 DP_NOTICE(edev, "The device is currently down\n");
3960 return 0;
3961 }
3962
3963 /* Remove the previous primary mac */
3964 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
3965 edev->primary_mac);
3966 if (rc)
3967 return rc;
3968
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02003969 edev->ops->common->update_mac(edev->cdev, addr->sa_data);
3970
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02003971 /* Add MAC filter according to the new unicast HW MAC address */
3972 ether_addr_copy(edev->primary_mac, ndev->dev_addr);
3973 return qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
3974 edev->primary_mac);
3975}
3976
3977static int
3978qede_configure_mcast_filtering(struct net_device *ndev,
3979 enum qed_filter_rx_mode_type *accept_flags)
3980{
3981 struct qede_dev *edev = netdev_priv(ndev);
3982 unsigned char *mc_macs, *temp;
3983 struct netdev_hw_addr *ha;
3984 int rc = 0, mc_count;
3985 size_t size;
3986
3987 size = 64 * ETH_ALEN;
3988
3989 mc_macs = kzalloc(size, GFP_KERNEL);
3990 if (!mc_macs) {
3991 DP_NOTICE(edev,
3992 "Failed to allocate memory for multicast MACs\n");
3993 rc = -ENOMEM;
3994 goto exit;
3995 }
3996
3997 temp = mc_macs;
3998
3999 /* Remove all previously configured MAC filters */
4000 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_DEL,
4001 mc_macs, 1);
4002 if (rc)
4003 goto exit;
4004
4005 netif_addr_lock_bh(ndev);
4006
4007 mc_count = netdev_mc_count(ndev);
4008 if (mc_count < 64) {
4009 netdev_for_each_mc_addr(ha, ndev) {
4010 ether_addr_copy(temp, ha->addr);
4011 temp += ETH_ALEN;
4012 }
4013 }
4014
4015 netif_addr_unlock_bh(ndev);
4016
4017 /* Check for all multicast @@@TBD resource allocation */
4018 if ((ndev->flags & IFF_ALLMULTI) ||
4019 (mc_count > 64)) {
4020 if (*accept_flags == QED_FILTER_RX_MODE_TYPE_REGULAR)
4021 *accept_flags = QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC;
4022 } else {
4023 /* Add all multicast MAC filters */
4024 rc = qede_set_mcast_rx_mac(edev, QED_FILTER_XCAST_TYPE_ADD,
4025 mc_macs, mc_count);
4026 }
4027
4028exit:
4029 kfree(mc_macs);
4030 return rc;
4031}
4032
4033static void qede_set_rx_mode(struct net_device *ndev)
4034{
4035 struct qede_dev *edev = netdev_priv(ndev);
4036
Mintz, Yuval567b3c12016-11-29 16:47:05 +02004037 set_bit(QEDE_SP_RX_MODE, &edev->sp_flags);
4038 schedule_delayed_work(&edev->sp_task, 0);
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02004039}
4040
4041/* Must be called with qede_lock held */
4042static void qede_config_rx_mode(struct net_device *ndev)
4043{
4044 enum qed_filter_rx_mode_type accept_flags = QED_FILTER_TYPE_UCAST;
4045 struct qede_dev *edev = netdev_priv(ndev);
4046 struct qed_filter_params rx_mode;
4047 unsigned char *uc_macs, *temp;
4048 struct netdev_hw_addr *ha;
4049 int rc, uc_count;
4050 size_t size;
4051
4052 netif_addr_lock_bh(ndev);
4053
4054 uc_count = netdev_uc_count(ndev);
4055 size = uc_count * ETH_ALEN;
4056
4057 uc_macs = kzalloc(size, GFP_ATOMIC);
4058 if (!uc_macs) {
4059 DP_NOTICE(edev, "Failed to allocate memory for unicast MACs\n");
4060 netif_addr_unlock_bh(ndev);
4061 return;
4062 }
4063
4064 temp = uc_macs;
4065 netdev_for_each_uc_addr(ha, ndev) {
4066 ether_addr_copy(temp, ha->addr);
4067 temp += ETH_ALEN;
4068 }
4069
4070 netif_addr_unlock_bh(ndev);
4071
4072 /* Configure the struct for the Rx mode */
4073 memset(&rx_mode, 0, sizeof(struct qed_filter_params));
4074 rx_mode.type = QED_FILTER_TYPE_RX_MODE;
4075
4076 /* Remove all previous unicast secondary macs and multicast macs
4077 * (configrue / leave the primary mac)
4078 */
4079 rc = qede_set_ucast_rx_mac(edev, QED_FILTER_XCAST_TYPE_REPLACE,
4080 edev->primary_mac);
4081 if (rc)
4082 goto out;
4083
4084 /* Check for promiscuous */
4085 if ((ndev->flags & IFF_PROMISC) ||
Yuval Mintz7b7e70f2016-10-14 05:19:20 -04004086 (uc_count > edev->dev_info.num_mac_filters - 1)) {
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02004087 accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC;
4088 } else {
4089 /* Add MAC filters according to the unicast secondary macs */
4090 int i;
4091
4092 temp = uc_macs;
4093 for (i = 0; i < uc_count; i++) {
4094 rc = qede_set_ucast_rx_mac(edev,
4095 QED_FILTER_XCAST_TYPE_ADD,
4096 temp);
4097 if (rc)
4098 goto out;
4099
4100 temp += ETH_ALEN;
4101 }
4102
4103 rc = qede_configure_mcast_filtering(ndev, &accept_flags);
4104 if (rc)
4105 goto out;
4106 }
4107
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02004108 /* take care of VLAN mode */
4109 if (ndev->flags & IFF_PROMISC) {
4110 qede_config_accept_any_vlan(edev, true);
4111 } else if (!edev->non_configured_vlans) {
4112 /* It's possible that accept_any_vlan mode is set due to a
4113 * previous setting of IFF_PROMISC. If vlan credits are
4114 * sufficient, disable accept_any_vlan.
4115 */
4116 qede_config_accept_any_vlan(edev, false);
4117 }
4118
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02004119 rx_mode.filter.accept_flags = accept_flags;
4120 edev->ops->filter_config(edev->cdev, &rx_mode);
4121out:
4122 kfree(uc_macs);
4123}