blob: b58509feecd5b7447860c79f5d7472e6e8d46384 [file] [log] [blame]
Yuval Mintze712d522015-10-26 11:02:27 +02001/* QLogic qede NIC Driver
Mintz, Yuvale8f1cb52017-01-01 13:57:00 +02002 * Copyright (c) 2015-2017 QLogic Corporation
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Yuval Mintze712d522015-10-26 11:02:27 +020032#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/version.h>
35#include <linux/device.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/skbuff.h>
39#include <linux/errno.h>
40#include <linux/list.h>
41#include <linux/string.h>
42#include <linux/dma-mapping.h>
43#include <linux/interrupt.h>
44#include <asm/byteorder.h>
45#include <asm/param.h>
46#include <linux/io.h>
47#include <linux/netdev_features.h>
48#include <linux/udp.h>
49#include <linux/tcp.h>
Alexander Duyckf9f082a2016-06-16 12:22:57 -070050#include <net/udp_tunnel.h>
Yuval Mintze712d522015-10-26 11:02:27 +020051#include <linux/ip.h>
52#include <net/ipv6.h>
53#include <net/tcp.h>
54#include <linux/if_ether.h>
55#include <linux/if_vlan.h>
56#include <linux/pkt_sched.h>
57#include <linux/ethtool.h>
58#include <linux/in.h>
59#include <linux/random.h>
60#include <net/ip6_checksum.h>
61#include <linux/bitops.h>
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +020062#include <linux/vmalloc.h>
Ram Amranicee9fbd2016-10-01 21:59:56 +030063#include <linux/qed/qede_roce.h>
Yuval Mintze712d522015-10-26 11:02:27 +020064#include "qede.h"
65
Yuval Mintz5abd7e922016-02-24 16:52:50 +020066static char version[] =
67 "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n";
Yuval Mintze712d522015-10-26 11:02:27 +020068
Yuval Mintz5abd7e922016-02-24 16:52:50 +020069MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
Yuval Mintze712d522015-10-26 11:02:27 +020070MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static uint debug;
74module_param(debug, uint, 0);
75MODULE_PARM_DESC(debug, " Default debug msglevel");
76
77static const struct qed_eth_ops *qed_ops;
78
79#define CHIP_NUM_57980S_40 0x1634
Yuval Mintz0e7441d2016-02-24 16:52:45 +020080#define CHIP_NUM_57980S_10 0x1666
Yuval Mintze712d522015-10-26 11:02:27 +020081#define CHIP_NUM_57980S_MF 0x1636
82#define CHIP_NUM_57980S_100 0x1644
83#define CHIP_NUM_57980S_50 0x1654
84#define CHIP_NUM_57980S_25 0x1656
Yuval Mintzfefb0202016-05-11 16:36:19 +030085#define CHIP_NUM_57980S_IOV 0x1664
Yuval Mintze712d522015-10-26 11:02:27 +020086
87#ifndef PCI_DEVICE_ID_NX2_57980E
88#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
89#define PCI_DEVICE_ID_57980S_10 CHIP_NUM_57980S_10
90#define PCI_DEVICE_ID_57980S_MF CHIP_NUM_57980S_MF
91#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
92#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
93#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
Yuval Mintzfefb0202016-05-11 16:36:19 +030094#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
Yuval Mintze712d522015-10-26 11:02:27 +020095#endif
96
Yuval Mintzfefb0202016-05-11 16:36:19 +030097enum qede_pci_private {
98 QEDE_PRIVATE_PF,
99 QEDE_PRIVATE_VF
100};
101
Yuval Mintze712d522015-10-26 11:02:27 +0200102static const struct pci_device_id qede_pci_tbl[] = {
Yuval Mintzfefb0202016-05-11 16:36:19 +0300103 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
104 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
105 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
106 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
107 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
108 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
Arnd Bergmann14b84e82016-06-01 15:29:13 +0200109#ifdef CONFIG_QED_SRIOV
Yuval Mintzfefb0202016-05-11 16:36:19 +0300110 {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
Arnd Bergmann14b84e82016-06-01 15:29:13 +0200111#endif
Yuval Mintze712d522015-10-26 11:02:27 +0200112 { 0 }
113};
114
115MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
116
117static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
118
119#define TX_TIMEOUT (5 * HZ)
120
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200121/* Utilize last protocol index for XDP */
122#define XDP_PI 11
123
Yuval Mintze712d522015-10-26 11:02:27 +0200124static void qede_remove(struct pci_dev *pdev);
Mintz, Yuval14d39642016-10-31 07:14:23 +0200125static void qede_shutdown(struct pci_dev *pdev);
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +0200126static void qede_link_update(void *dev, struct qed_link_output *link);
Yuval Mintze712d522015-10-26 11:02:27 +0200127
Mintz, Yuval567b3c12016-11-29 16:47:05 +0200128/* The qede lock is used to protect driver state change and driver flows that
129 * are not reentrant.
130 */
131void __qede_lock(struct qede_dev *edev)
132{
133 mutex_lock(&edev->qede_lock);
134}
135
136void __qede_unlock(struct qede_dev *edev)
137{
138 mutex_unlock(&edev->qede_lock);
139}
140
Yuval Mintzfefb0202016-05-11 16:36:19 +0300141#ifdef CONFIG_QED_SRIOV
Moshe Shemesh79aab092016-09-22 12:11:15 +0300142static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
143 __be16 vlan_proto)
Yuval Mintz08feecd2016-05-11 16:36:20 +0300144{
145 struct qede_dev *edev = netdev_priv(ndev);
146
147 if (vlan > 4095) {
148 DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
149 return -EINVAL;
150 }
151
Moshe Shemesh79aab092016-09-22 12:11:15 +0300152 if (vlan_proto != htons(ETH_P_8021Q))
153 return -EPROTONOSUPPORT;
154
Yuval Mintz08feecd2016-05-11 16:36:20 +0300155 DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
156 vlan, vf);
157
158 return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
159}
160
Yuval Mintzeff16962016-05-11 16:36:21 +0300161static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
162{
163 struct qede_dev *edev = netdev_priv(ndev);
164
165 DP_VERBOSE(edev, QED_MSG_IOV,
166 "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
167 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
168
169 if (!is_valid_ether_addr(mac)) {
170 DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
171 return -EINVAL;
172 }
173
174 return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
175}
176
Yuval Mintzfefb0202016-05-11 16:36:19 +0300177static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
178{
179 struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300180 struct qed_dev_info *qed_info = &edev->dev_info.common;
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200181 struct qed_update_vport_params *vport_params;
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300182 int rc;
Yuval Mintzfefb0202016-05-11 16:36:19 +0300183
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200184 vport_params = vzalloc(sizeof(*vport_params));
185 if (!vport_params)
186 return -ENOMEM;
Yuval Mintzfefb0202016-05-11 16:36:19 +0300187 DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
188
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300189 rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
190
191 /* Enable/Disable Tx switching for PF */
192 if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
193 qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200194 vport_params->vport_id = 0;
195 vport_params->update_tx_switching_flg = 1;
196 vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
197 edev->ops->vport_update(edev->cdev, vport_params);
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300198 }
199
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +0200200 vfree(vport_params);
Yuval Mintz831bfb0e2016-05-11 16:36:25 +0300201 return rc;
Yuval Mintzfefb0202016-05-11 16:36:19 +0300202}
203#endif
204
Yuval Mintze712d522015-10-26 11:02:27 +0200205static struct pci_driver qede_pci_driver = {
206 .name = "qede",
207 .id_table = qede_pci_tbl,
208 .probe = qede_probe,
209 .remove = qede_remove,
Mintz, Yuval14d39642016-10-31 07:14:23 +0200210 .shutdown = qede_shutdown,
Yuval Mintzfefb0202016-05-11 16:36:19 +0300211#ifdef CONFIG_QED_SRIOV
212 .sriov_configure = qede_sriov_configure,
213#endif
Yuval Mintze712d522015-10-26 11:02:27 +0200214};
215
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +0200216static struct qed_eth_cb_ops qede_ll_ops = {
217 {
218 .link_update = qede_link_update,
219 },
Yuval Mintzeff16962016-05-11 16:36:21 +0300220 .force_mac = qede_force_mac,
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +0200221};
222
Yuval Mintz29502192015-10-26 11:02:29 +0200223static int qede_netdev_event(struct notifier_block *this, unsigned long event,
224 void *ptr)
225{
226 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
227 struct ethtool_drvinfo drvinfo;
228 struct qede_dev *edev;
229
Ram Amranicee9fbd2016-10-01 21:59:56 +0300230 if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
Yuval Mintz29502192015-10-26 11:02:29 +0200231 goto done;
232
233 /* Check whether this is a qede device */
234 if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
235 goto done;
236
237 memset(&drvinfo, 0, sizeof(drvinfo));
238 ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
239 if (strcmp(drvinfo.driver, "qede"))
240 goto done;
241 edev = netdev_priv(ndev);
242
Ram Amranicee9fbd2016-10-01 21:59:56 +0300243 switch (event) {
244 case NETDEV_CHANGENAME:
245 /* Notify qed of the name change */
246 if (!edev->ops || !edev->ops->common)
247 goto done;
248 edev->ops->common->set_id(edev->cdev, edev->ndev->name, "qede");
249 break;
250 case NETDEV_CHANGEADDR:
251 edev = netdev_priv(ndev);
252 qede_roce_event_changeaddr(edev);
253 break;
254 }
Yuval Mintz29502192015-10-26 11:02:29 +0200255
256done:
257 return NOTIFY_DONE;
258}
259
260static struct notifier_block qede_netdev_notifier = {
261 .notifier_call = qede_netdev_event,
262};
263
Yuval Mintze712d522015-10-26 11:02:27 +0200264static
265int __init qede_init(void)
266{
267 int ret;
Yuval Mintze712d522015-10-26 11:02:27 +0200268
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300269 pr_info("qede_init: %s\n", version);
Yuval Mintze712d522015-10-26 11:02:27 +0200270
Rahul Verma95114342016-04-10 12:42:59 +0300271 qed_ops = qed_get_eth_ops();
Yuval Mintze712d522015-10-26 11:02:27 +0200272 if (!qed_ops) {
273 pr_notice("Failed to get qed ethtool operations\n");
274 return -EINVAL;
275 }
276
Yuval Mintz29502192015-10-26 11:02:29 +0200277 /* Must register notifier before pci ops, since we might miss
278 * interface rename after pci probe and netdev registeration.
279 */
280 ret = register_netdevice_notifier(&qede_netdev_notifier);
281 if (ret) {
282 pr_notice("Failed to register netdevice_notifier\n");
283 qed_put_eth_ops();
284 return -EINVAL;
285 }
286
Yuval Mintze712d522015-10-26 11:02:27 +0200287 ret = pci_register_driver(&qede_pci_driver);
288 if (ret) {
289 pr_notice("Failed to register driver\n");
Yuval Mintz29502192015-10-26 11:02:29 +0200290 unregister_netdevice_notifier(&qede_netdev_notifier);
Yuval Mintze712d522015-10-26 11:02:27 +0200291 qed_put_eth_ops();
292 return -EINVAL;
293 }
294
295 return 0;
296}
297
298static void __exit qede_cleanup(void)
299{
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300300 if (debug & QED_LOG_INFO_MASK)
301 pr_info("qede_cleanup called\n");
Yuval Mintze712d522015-10-26 11:02:27 +0200302
Yuval Mintz29502192015-10-26 11:02:29 +0200303 unregister_netdevice_notifier(&qede_netdev_notifier);
Yuval Mintze712d522015-10-26 11:02:27 +0200304 pci_unregister_driver(&qede_pci_driver);
305 qed_put_eth_ops();
306}
307
308module_init(qede_init);
309module_exit(qede_cleanup);
310
Yuval Mintz29502192015-10-26 11:02:29 +0200311static int qede_open(struct net_device *ndev);
312static int qede_close(struct net_device *ndev);
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +0200313
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200314void qede_fill_by_demand_stats(struct qede_dev *edev)
315{
316 struct qed_eth_stats stats;
317
318 edev->ops->get_vport_stats(edev->cdev, &stats);
319 edev->stats.no_buff_discards = stats.no_buff_discards;
Sudarsana Reddy Kalluru1a5a3662016-08-16 10:51:01 -0400320 edev->stats.packet_too_big_discard = stats.packet_too_big_discard;
321 edev->stats.ttl0_discard = stats.ttl0_discard;
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200322 edev->stats.rx_ucast_bytes = stats.rx_ucast_bytes;
323 edev->stats.rx_mcast_bytes = stats.rx_mcast_bytes;
324 edev->stats.rx_bcast_bytes = stats.rx_bcast_bytes;
325 edev->stats.rx_ucast_pkts = stats.rx_ucast_pkts;
326 edev->stats.rx_mcast_pkts = stats.rx_mcast_pkts;
327 edev->stats.rx_bcast_pkts = stats.rx_bcast_pkts;
328 edev->stats.mftag_filter_discards = stats.mftag_filter_discards;
329 edev->stats.mac_filter_discards = stats.mac_filter_discards;
330
331 edev->stats.tx_ucast_bytes = stats.tx_ucast_bytes;
332 edev->stats.tx_mcast_bytes = stats.tx_mcast_bytes;
333 edev->stats.tx_bcast_bytes = stats.tx_bcast_bytes;
334 edev->stats.tx_ucast_pkts = stats.tx_ucast_pkts;
335 edev->stats.tx_mcast_pkts = stats.tx_mcast_pkts;
336 edev->stats.tx_bcast_pkts = stats.tx_bcast_pkts;
337 edev->stats.tx_err_drop_pkts = stats.tx_err_drop_pkts;
338 edev->stats.coalesced_pkts = stats.tpa_coalesced_pkts;
339 edev->stats.coalesced_events = stats.tpa_coalesced_events;
340 edev->stats.coalesced_aborts_num = stats.tpa_aborts_num;
341 edev->stats.non_coalesced_pkts = stats.tpa_not_coalesced_pkts;
342 edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
343
344 edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
Yuval Mintzd4967cf2016-04-22 08:41:01 +0300345 edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
346 edev->stats.rx_128_to_255_byte_packets =
347 stats.rx_128_to_255_byte_packets;
348 edev->stats.rx_256_to_511_byte_packets =
349 stats.rx_256_to_511_byte_packets;
350 edev->stats.rx_512_to_1023_byte_packets =
351 stats.rx_512_to_1023_byte_packets;
352 edev->stats.rx_1024_to_1518_byte_packets =
353 stats.rx_1024_to_1518_byte_packets;
354 edev->stats.rx_1519_to_1522_byte_packets =
355 stats.rx_1519_to_1522_byte_packets;
356 edev->stats.rx_1519_to_2047_byte_packets =
357 stats.rx_1519_to_2047_byte_packets;
358 edev->stats.rx_2048_to_4095_byte_packets =
359 stats.rx_2048_to_4095_byte_packets;
360 edev->stats.rx_4096_to_9216_byte_packets =
361 stats.rx_4096_to_9216_byte_packets;
362 edev->stats.rx_9217_to_16383_byte_packets =
363 stats.rx_9217_to_16383_byte_packets;
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200364 edev->stats.rx_crc_errors = stats.rx_crc_errors;
365 edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
366 edev->stats.rx_pause_frames = stats.rx_pause_frames;
367 edev->stats.rx_pfc_frames = stats.rx_pfc_frames;
368 edev->stats.rx_align_errors = stats.rx_align_errors;
369 edev->stats.rx_carrier_errors = stats.rx_carrier_errors;
370 edev->stats.rx_oversize_packets = stats.rx_oversize_packets;
371 edev->stats.rx_jabbers = stats.rx_jabbers;
372 edev->stats.rx_undersize_packets = stats.rx_undersize_packets;
373 edev->stats.rx_fragments = stats.rx_fragments;
374 edev->stats.tx_64_byte_packets = stats.tx_64_byte_packets;
375 edev->stats.tx_65_to_127_byte_packets = stats.tx_65_to_127_byte_packets;
376 edev->stats.tx_128_to_255_byte_packets =
377 stats.tx_128_to_255_byte_packets;
378 edev->stats.tx_256_to_511_byte_packets =
379 stats.tx_256_to_511_byte_packets;
380 edev->stats.tx_512_to_1023_byte_packets =
381 stats.tx_512_to_1023_byte_packets;
382 edev->stats.tx_1024_to_1518_byte_packets =
383 stats.tx_1024_to_1518_byte_packets;
384 edev->stats.tx_1519_to_2047_byte_packets =
385 stats.tx_1519_to_2047_byte_packets;
386 edev->stats.tx_2048_to_4095_byte_packets =
387 stats.tx_2048_to_4095_byte_packets;
388 edev->stats.tx_4096_to_9216_byte_packets =
389 stats.tx_4096_to_9216_byte_packets;
390 edev->stats.tx_9217_to_16383_byte_packets =
391 stats.tx_9217_to_16383_byte_packets;
392 edev->stats.tx_pause_frames = stats.tx_pause_frames;
393 edev->stats.tx_pfc_frames = stats.tx_pfc_frames;
394 edev->stats.tx_lpi_entry_count = stats.tx_lpi_entry_count;
395 edev->stats.tx_total_collisions = stats.tx_total_collisions;
396 edev->stats.brb_truncates = stats.brb_truncates;
397 edev->stats.brb_discards = stats.brb_discards;
398 edev->stats.tx_mac_ctrl_frames = stats.tx_mac_ctrl_frames;
399}
400
Yuval Mintz1a635e42016-08-15 10:42:43 +0300401static
402struct rtnl_link_stats64 *qede_get_stats64(struct net_device *dev,
403 struct rtnl_link_stats64 *stats)
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200404{
405 struct qede_dev *edev = netdev_priv(dev);
406
407 qede_fill_by_demand_stats(edev);
408
409 stats->rx_packets = edev->stats.rx_ucast_pkts +
410 edev->stats.rx_mcast_pkts +
411 edev->stats.rx_bcast_pkts;
412 stats->tx_packets = edev->stats.tx_ucast_pkts +
413 edev->stats.tx_mcast_pkts +
414 edev->stats.tx_bcast_pkts;
415
416 stats->rx_bytes = edev->stats.rx_ucast_bytes +
417 edev->stats.rx_mcast_bytes +
418 edev->stats.rx_bcast_bytes;
419
420 stats->tx_bytes = edev->stats.tx_ucast_bytes +
421 edev->stats.tx_mcast_bytes +
422 edev->stats.tx_bcast_bytes;
423
424 stats->tx_errors = edev->stats.tx_err_drop_pkts;
425 stats->multicast = edev->stats.rx_mcast_pkts +
426 edev->stats.rx_bcast_pkts;
427
428 stats->rx_fifo_errors = edev->stats.no_buff_discards;
429
430 stats->collisions = edev->stats.tx_total_collisions;
431 stats->rx_crc_errors = edev->stats.rx_crc_errors;
432 stats->rx_frame_errors = edev->stats.rx_align_errors;
433
434 return stats;
435}
436
Yuval Mintz733def62016-05-11 16:36:22 +0300437#ifdef CONFIG_QED_SRIOV
Yuval Mintz73390ac2016-05-11 16:36:24 +0300438static int qede_get_vf_config(struct net_device *dev, int vfidx,
439 struct ifla_vf_info *ivi)
440{
441 struct qede_dev *edev = netdev_priv(dev);
442
443 if (!edev->ops)
444 return -EINVAL;
445
446 return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
447}
448
Yuval Mintz733def62016-05-11 16:36:22 +0300449static int qede_set_vf_rate(struct net_device *dev, int vfidx,
450 int min_tx_rate, int max_tx_rate)
451{
452 struct qede_dev *edev = netdev_priv(dev);
453
Yuval Mintzbe7b6d62016-05-26 11:01:17 +0300454 return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
Yuval Mintz733def62016-05-11 16:36:22 +0300455 max_tx_rate);
456}
457
Yuval Mintz6ddc7602016-05-11 16:36:23 +0300458static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
459{
460 struct qede_dev *edev = netdev_priv(dev);
461
462 if (!edev->ops)
463 return -EINVAL;
464
465 return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
466}
467
Yuval Mintz733def62016-05-11 16:36:22 +0300468static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
469 int link_state)
470{
471 struct qede_dev *edev = netdev_priv(dev);
472
473 if (!edev->ops)
474 return -EINVAL;
475
476 return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
477}
Mintz, Yuvalf990c822017-01-01 13:57:08 +0200478
479static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
480{
481 struct qede_dev *edev = netdev_priv(dev);
482
483 if (!edev->ops)
484 return -EINVAL;
485
486 return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
487}
Yuval Mintz733def62016-05-11 16:36:22 +0300488#endif
489
Yuval Mintz29502192015-10-26 11:02:29 +0200490static const struct net_device_ops qede_netdev_ops = {
491 .ndo_open = qede_open,
492 .ndo_stop = qede_close,
493 .ndo_start_xmit = qede_start_xmit,
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200494 .ndo_set_rx_mode = qede_set_rx_mode,
495 .ndo_set_mac_address = qede_set_mac_addr,
Yuval Mintz29502192015-10-26 11:02:29 +0200496 .ndo_validate_addr = eth_validate_addr,
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200497 .ndo_change_mtu = qede_change_mtu,
Yuval Mintz08feecd2016-05-11 16:36:20 +0300498#ifdef CONFIG_QED_SRIOV
Yuval Mintzeff16962016-05-11 16:36:21 +0300499 .ndo_set_vf_mac = qede_set_vf_mac,
Yuval Mintz08feecd2016-05-11 16:36:20 +0300500 .ndo_set_vf_vlan = qede_set_vf_vlan,
Mintz, Yuvalf990c822017-01-01 13:57:08 +0200501 .ndo_set_vf_trust = qede_set_vf_trust,
Yuval Mintz08feecd2016-05-11 16:36:20 +0300502#endif
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +0200503 .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
504 .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
Yuval Mintzce2b8852016-05-26 11:01:18 +0300505 .ndo_set_features = qede_set_features,
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200506 .ndo_get_stats64 = qede_get_stats64,
Yuval Mintz733def62016-05-11 16:36:22 +0300507#ifdef CONFIG_QED_SRIOV
508 .ndo_set_vf_link_state = qede_set_vf_link_state,
Yuval Mintz6ddc7602016-05-11 16:36:23 +0300509 .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
Yuval Mintz73390ac2016-05-11 16:36:24 +0300510 .ndo_get_vf_config = qede_get_vf_config,
Yuval Mintz733def62016-05-11 16:36:22 +0300511 .ndo_set_vf_rate = qede_set_vf_rate,
512#endif
Alexander Duyckf9f082a2016-06-16 12:22:57 -0700513 .ndo_udp_tunnel_add = qede_udp_tunnel_add,
514 .ndo_udp_tunnel_del = qede_udp_tunnel_del,
Manish Chopra25695852016-10-14 05:19:19 -0400515 .ndo_features_check = qede_features_check,
Mintz, Yuval496e0512016-11-29 16:47:09 +0200516 .ndo_xdp = qede_xdp,
Yuval Mintz29502192015-10-26 11:02:29 +0200517};
518
519/* -------------------------------------------------------------------------
Yuval Mintze712d522015-10-26 11:02:27 +0200520 * START OF PROBE / REMOVE
521 * -------------------------------------------------------------------------
522 */
523
524static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
525 struct pci_dev *pdev,
526 struct qed_dev_eth_info *info,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300527 u32 dp_module, u8 dp_level)
Yuval Mintze712d522015-10-26 11:02:27 +0200528{
529 struct net_device *ndev;
530 struct qede_dev *edev;
531
532 ndev = alloc_etherdev_mqs(sizeof(*edev),
Yuval Mintz1a635e42016-08-15 10:42:43 +0300533 info->num_queues, info->num_queues);
Yuval Mintze712d522015-10-26 11:02:27 +0200534 if (!ndev) {
535 pr_err("etherdev allocation failed\n");
536 return NULL;
537 }
538
539 edev = netdev_priv(ndev);
540 edev->ndev = ndev;
541 edev->cdev = cdev;
542 edev->pdev = pdev;
543 edev->dp_module = dp_module;
544 edev->dp_level = dp_level;
545 edev->ops = qed_ops;
Yuval Mintz29502192015-10-26 11:02:29 +0200546 edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
547 edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
Yuval Mintze712d522015-10-26 11:02:27 +0200548
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300549 DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
550 info->num_queues, info->num_queues);
551
Yuval Mintze712d522015-10-26 11:02:27 +0200552 SET_NETDEV_DEV(ndev, &pdev->dev);
553
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200554 memset(&edev->stats, 0, sizeof(edev->stats));
Yuval Mintze712d522015-10-26 11:02:27 +0200555 memcpy(&edev->dev_info, info, sizeof(*info));
556
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +0200557 INIT_LIST_HEAD(&edev->vlan_list);
558
Yuval Mintze712d522015-10-26 11:02:27 +0200559 return edev;
560}
561
562static void qede_init_ndev(struct qede_dev *edev)
563{
564 struct net_device *ndev = edev->ndev;
565 struct pci_dev *pdev = edev->pdev;
566 u32 hw_features;
567
568 pci_set_drvdata(pdev, ndev);
569
570 ndev->mem_start = edev->dev_info.common.pci_mem_start;
571 ndev->base_addr = ndev->mem_start;
572 ndev->mem_end = edev->dev_info.common.pci_mem_end;
573 ndev->irq = edev->dev_info.common.pci_irq;
574
575 ndev->watchdog_timeo = TX_TIMEOUT;
576
Yuval Mintz29502192015-10-26 11:02:29 +0200577 ndev->netdev_ops = &qede_netdev_ops;
578
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200579 qede_set_ethtool_ops(ndev);
580
Mintz, Yuval0183eb12016-10-31 22:26:53 +0200581 ndev->priv_flags |= IFF_UNICAST_FLT;
Yuval Mintz7b7e70f2016-10-14 05:19:20 -0400582
Yuval Mintze712d522015-10-26 11:02:27 +0200583 /* user-changeble features */
584 hw_features = NETIF_F_GRO | NETIF_F_SG |
585 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
586 NETIF_F_TSO | NETIF_F_TSO6;
587
Manish Chopra14db81d2016-04-14 01:38:33 -0400588 /* Encap features*/
589 hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
Manish Chopraa1502412016-10-14 05:19:18 -0400590 NETIF_F_TSO_ECN | NETIF_F_GSO_UDP_TUNNEL_CSUM |
591 NETIF_F_GSO_GRE_CSUM;
Manish Chopra14db81d2016-04-14 01:38:33 -0400592 ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
593 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
594 NETIF_F_TSO6 | NETIF_F_GSO_GRE |
Manish Chopraa1502412016-10-14 05:19:18 -0400595 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM |
596 NETIF_F_GSO_UDP_TUNNEL_CSUM |
597 NETIF_F_GSO_GRE_CSUM;
Manish Chopra14db81d2016-04-14 01:38:33 -0400598
Yuval Mintze712d522015-10-26 11:02:27 +0200599 ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
600 NETIF_F_HIGHDMA;
601 ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
602 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +0200603 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
Yuval Mintze712d522015-10-26 11:02:27 +0200604
605 ndev->hw_features = hw_features;
606
Jarod Wilsoncaff2a82016-10-17 15:54:08 -0400607 /* MTU range: 46 - 9600 */
608 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
609 ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
610
Yuval Mintze712d522015-10-26 11:02:27 +0200611 /* Set network device HW mac */
612 ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +0200613
614 ndev->mtu = edev->dev_info.common.mtu;
Yuval Mintze712d522015-10-26 11:02:27 +0200615}
616
617/* This function converts from 32b param to two params of level and module
618 * Input 32b decoding:
619 * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
620 * 'happy' flow, e.g. memory allocation failed.
621 * b30 - enable all INFO prints. INFO prints are for major steps in the flow
622 * and provide important parameters.
623 * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
624 * module. VERBOSE prints are for tracking the specific flow in low level.
625 *
626 * Notice that the level should be that of the lowest required logs.
627 */
Sudarsana Kalluru133fac02015-10-26 11:02:34 +0200628void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
Yuval Mintze712d522015-10-26 11:02:27 +0200629{
630 *p_dp_level = QED_LEVEL_NOTICE;
631 *p_dp_module = 0;
632
633 if (debug & QED_LOG_VERBOSE_MASK) {
634 *p_dp_level = QED_LEVEL_VERBOSE;
635 *p_dp_module = (debug & 0x3FFFFFFF);
636 } else if (debug & QED_LOG_INFO_MASK) {
637 *p_dp_level = QED_LEVEL_INFO;
638 } else if (debug & QED_LOG_NOTICE_MASK) {
639 *p_dp_level = QED_LEVEL_NOTICE;
640 }
641}
642
Yuval Mintz29502192015-10-26 11:02:29 +0200643static void qede_free_fp_array(struct qede_dev *edev)
644{
645 if (edev->fp_array) {
646 struct qede_fastpath *fp;
647 int i;
648
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400649 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +0200650 fp = &edev->fp_array[i];
651
652 kfree(fp->sb_info);
653 kfree(fp->rxq);
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200654 kfree(fp->xdp_tx);
Mintz, Yuval80439a12016-11-29 16:47:02 +0200655 kfree(fp->txq);
Yuval Mintz29502192015-10-26 11:02:29 +0200656 }
657 kfree(edev->fp_array);
658 }
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400659
660 edev->num_queues = 0;
661 edev->fp_num_tx = 0;
662 edev->fp_num_rx = 0;
Yuval Mintz29502192015-10-26 11:02:29 +0200663}
664
665static int qede_alloc_fp_array(struct qede_dev *edev)
666{
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400667 u8 fp_combined, fp_rx = edev->fp_num_rx;
Yuval Mintz29502192015-10-26 11:02:29 +0200668 struct qede_fastpath *fp;
669 int i;
670
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400671 edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
Yuval Mintz29502192015-10-26 11:02:29 +0200672 sizeof(*edev->fp_array), GFP_KERNEL);
673 if (!edev->fp_array) {
674 DP_NOTICE(edev, "fp array allocation failed\n");
675 goto err;
676 }
677
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400678 fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
679
680 /* Allocate the FP elements for Rx queues followed by combined and then
681 * the Tx. This ordering should be maintained so that the respective
682 * queues (Rx or Tx) will be together in the fastpath array and the
683 * associated ids will be sequential.
684 */
685 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +0200686 fp = &edev->fp_array[i];
687
Mintz, Yuval80439a12016-11-29 16:47:02 +0200688 fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
Yuval Mintz29502192015-10-26 11:02:29 +0200689 if (!fp->sb_info) {
690 DP_NOTICE(edev, "sb info struct allocation failed\n");
691 goto err;
692 }
693
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400694 if (fp_rx) {
695 fp->type = QEDE_FASTPATH_RX;
696 fp_rx--;
697 } else if (fp_combined) {
698 fp->type = QEDE_FASTPATH_COMBINED;
699 fp_combined--;
700 } else {
701 fp->type = QEDE_FASTPATH_TX;
Yuval Mintz29502192015-10-26 11:02:29 +0200702 }
703
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400704 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval80439a12016-11-29 16:47:02 +0200705 fp->txq = kzalloc(sizeof(*fp->txq), GFP_KERNEL);
706 if (!fp->txq)
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400707 goto err;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400708 }
709
710 if (fp->type & QEDE_FASTPATH_RX) {
Mintz, Yuval80439a12016-11-29 16:47:02 +0200711 fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
712 if (!fp->rxq)
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400713 goto err;
Mintz, Yuval496e0512016-11-29 16:47:09 +0200714
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200715 if (edev->xdp_prog) {
716 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
717 GFP_KERNEL);
718 if (!fp->xdp_tx)
719 goto err;
Mintz, Yuval496e0512016-11-29 16:47:09 +0200720 fp->type |= QEDE_FASTPATH_XDP;
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200721 }
Yuval Mintz29502192015-10-26 11:02:29 +0200722 }
723 }
724
725 return 0;
726err:
727 qede_free_fp_array(edev);
728 return -ENOMEM;
729}
730
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200731static void qede_sp_task(struct work_struct *work)
732{
733 struct qede_dev *edev = container_of(work, struct qede_dev,
734 sp_task.work);
Manish Choprab18e1702016-04-14 01:38:30 -0400735 struct qed_dev *cdev = edev->cdev;
736
Mintz, Yuval567b3c12016-11-29 16:47:05 +0200737 __qede_lock(edev);
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200738
Mintz, Yuval567b3c12016-11-29 16:47:05 +0200739 if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
740 if (edev->state == QEDE_STATE_OPEN)
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200741 qede_config_rx_mode(edev->ndev);
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200742
Manish Choprab18e1702016-04-14 01:38:30 -0400743 if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
744 struct qed_tunn_params tunn_params;
745
746 memset(&tunn_params, 0, sizeof(tunn_params));
747 tunn_params.update_vxlan_port = 1;
748 tunn_params.vxlan_port = edev->vxlan_dst_port;
749 qed_ops->tunn_config(cdev, &tunn_params);
750 }
751
Manish Chopra9a109dd2016-04-14 01:38:31 -0400752 if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
753 struct qed_tunn_params tunn_params;
754
755 memset(&tunn_params, 0, sizeof(tunn_params));
756 tunn_params.update_geneve_port = 1;
757 tunn_params.geneve_port = edev->geneve_dst_port;
758 qed_ops->tunn_config(cdev, &tunn_params);
759 }
760
Mintz, Yuval567b3c12016-11-29 16:47:05 +0200761 __qede_unlock(edev);
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200762}
763
Yuval Mintze712d522015-10-26 11:02:27 +0200764static void qede_update_pf_params(struct qed_dev *cdev)
765{
766 struct qed_pf_params pf_params;
767
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +0200768 /* 64 rx + 64 tx + 64 XDP */
Yuval Mintze712d522015-10-26 11:02:27 +0200769 memset(&pf_params, 0, sizeof(struct qed_pf_params));
Mintz, Yuvale1d32ac2017-01-01 13:57:03 +0200770 pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * 3;
Yuval Mintze712d522015-10-26 11:02:27 +0200771 qed_ops->common->update_pf_params(cdev, &pf_params);
772}
773
774enum qede_probe_mode {
775 QEDE_PROBE_NORMAL,
776};
777
778static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300779 bool is_vf, enum qede_probe_mode mode)
Yuval Mintze712d522015-10-26 11:02:27 +0200780{
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300781 struct qed_probe_params probe_params;
Yuval Mintz1a635e42016-08-15 10:42:43 +0300782 struct qed_slowpath_params sp_params;
Yuval Mintze712d522015-10-26 11:02:27 +0200783 struct qed_dev_eth_info dev_info;
784 struct qede_dev *edev;
785 struct qed_dev *cdev;
786 int rc;
787
788 if (unlikely(dp_level & QED_LEVEL_INFO))
789 pr_notice("Starting qede probe\n");
790
Yuval Mintz1408cc1f2016-05-11 16:36:14 +0300791 memset(&probe_params, 0, sizeof(probe_params));
792 probe_params.protocol = QED_PROTOCOL_ETH;
793 probe_params.dp_module = dp_module;
794 probe_params.dp_level = dp_level;
795 probe_params.is_vf = is_vf;
796 cdev = qed_ops->common->probe(pdev, &probe_params);
Yuval Mintze712d522015-10-26 11:02:27 +0200797 if (!cdev) {
798 rc = -ENODEV;
799 goto err0;
800 }
801
802 qede_update_pf_params(cdev);
803
804 /* Start the Slowpath-process */
Yuval Mintz1a635e42016-08-15 10:42:43 +0300805 memset(&sp_params, 0, sizeof(sp_params));
806 sp_params.int_mode = QED_INT_MODE_MSIX;
807 sp_params.drv_major = QEDE_MAJOR_VERSION;
808 sp_params.drv_minor = QEDE_MINOR_VERSION;
809 sp_params.drv_rev = QEDE_REVISION_VERSION;
810 sp_params.drv_eng = QEDE_ENGINEERING_VERSION;
811 strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
812 rc = qed_ops->common->slowpath_start(cdev, &sp_params);
Yuval Mintze712d522015-10-26 11:02:27 +0200813 if (rc) {
814 pr_notice("Cannot start slowpath\n");
815 goto err1;
816 }
817
818 /* Learn information crucial for qede to progress */
819 rc = qed_ops->fill_dev_info(cdev, &dev_info);
820 if (rc)
821 goto err2;
822
823 edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
824 dp_level);
825 if (!edev) {
826 rc = -ENOMEM;
827 goto err2;
828 }
829
Yuval Mintzfefb0202016-05-11 16:36:19 +0300830 if (is_vf)
831 edev->flags |= QEDE_FLAG_IS_VF;
832
Yuval Mintze712d522015-10-26 11:02:27 +0200833 qede_init_ndev(edev);
834
Ram Amranicee9fbd2016-10-01 21:59:56 +0300835 rc = qede_roce_dev_add(edev);
836 if (rc)
837 goto err3;
838
Yuval Mintz29502192015-10-26 11:02:29 +0200839 rc = register_netdev(edev->ndev);
840 if (rc) {
841 DP_NOTICE(edev, "Cannot register net-device\n");
Ram Amranicee9fbd2016-10-01 21:59:56 +0300842 goto err4;
Yuval Mintz29502192015-10-26 11:02:29 +0200843 }
844
Yuval Mintze712d522015-10-26 11:02:27 +0200845 edev->ops->common->set_id(cdev, edev->ndev->name, DRV_MODULE_VERSION);
846
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +0200847 edev->ops->register_ops(cdev, &qede_ll_ops, edev);
848
Sudarsana Reddy Kalluru489e45a2016-06-08 06:22:12 -0400849#ifdef CONFIG_DCB
Sudarsana Reddy Kalluru5fe118c2016-08-29 08:29:52 -0400850 if (!IS_VF(edev))
851 qede_set_dcbnl_ops(edev->ndev);
Sudarsana Reddy Kalluru489e45a2016-06-08 06:22:12 -0400852#endif
853
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200854 INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
855 mutex_init(&edev->qede_lock);
Manish Chopra3d789992016-06-30 02:35:21 -0400856 edev->rx_copybreak = QEDE_RX_HDR_SIZE;
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200857
Yuval Mintze712d522015-10-26 11:02:27 +0200858 DP_INFO(edev, "Ending successfully qede probe\n");
859
860 return 0;
861
Ram Amranicee9fbd2016-10-01 21:59:56 +0300862err4:
863 qede_roce_dev_remove(edev);
Yuval Mintz29502192015-10-26 11:02:29 +0200864err3:
865 free_netdev(edev->ndev);
Yuval Mintze712d522015-10-26 11:02:27 +0200866err2:
867 qed_ops->common->slowpath_stop(cdev);
868err1:
869 qed_ops->common->remove(cdev);
870err0:
871 return rc;
872}
873
874static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
875{
Yuval Mintzfefb0202016-05-11 16:36:19 +0300876 bool is_vf = false;
Yuval Mintze712d522015-10-26 11:02:27 +0200877 u32 dp_module = 0;
878 u8 dp_level = 0;
879
Yuval Mintzfefb0202016-05-11 16:36:19 +0300880 switch ((enum qede_pci_private)id->driver_data) {
881 case QEDE_PRIVATE_VF:
882 if (debug & QED_LOG_VERBOSE_MASK)
883 dev_err(&pdev->dev, "Probing a VF\n");
884 is_vf = true;
885 break;
886 default:
887 if (debug & QED_LOG_VERBOSE_MASK)
888 dev_err(&pdev->dev, "Probing a PF\n");
889 }
890
Yuval Mintze712d522015-10-26 11:02:27 +0200891 qede_config_debug(debug, &dp_module, &dp_level);
892
Yuval Mintzfefb0202016-05-11 16:36:19 +0300893 return __qede_probe(pdev, dp_module, dp_level, is_vf,
Yuval Mintze712d522015-10-26 11:02:27 +0200894 QEDE_PROBE_NORMAL);
895}
896
897enum qede_remove_mode {
898 QEDE_REMOVE_NORMAL,
899};
900
901static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
902{
903 struct net_device *ndev = pci_get_drvdata(pdev);
904 struct qede_dev *edev = netdev_priv(ndev);
905 struct qed_dev *cdev = edev->cdev;
906
907 DP_INFO(edev, "Starting qede_remove\n");
908
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +0200909 cancel_delayed_work_sync(&edev->sp_task);
Ram Amranicee9fbd2016-10-01 21:59:56 +0300910
Yuval Mintz29502192015-10-26 11:02:29 +0200911 unregister_netdev(ndev);
912
Ram Amranicee9fbd2016-10-01 21:59:56 +0300913 qede_roce_dev_remove(edev);
914
Yuval Mintze712d522015-10-26 11:02:27 +0200915 edev->ops->common->set_power_state(cdev, PCI_D0);
916
917 pci_set_drvdata(pdev, NULL);
918
Mintz, Yuval496e0512016-11-29 16:47:09 +0200919 /* Release edev's reference to XDP's bpf if such exist */
920 if (edev->xdp_prog)
921 bpf_prog_put(edev->xdp_prog);
922
Yuval Mintze712d522015-10-26 11:02:27 +0200923 free_netdev(ndev);
924
925 /* Use global ops since we've freed edev */
926 qed_ops->common->slowpath_stop(cdev);
Mintz, Yuval14d39642016-10-31 07:14:23 +0200927 if (system_state == SYSTEM_POWER_OFF)
928 return;
Yuval Mintze712d522015-10-26 11:02:27 +0200929 qed_ops->common->remove(cdev);
930
Yuval Mintz525ef5c2016-08-15 10:42:45 +0300931 dev_info(&pdev->dev, "Ending qede_remove successfully\n");
Yuval Mintze712d522015-10-26 11:02:27 +0200932}
933
934static void qede_remove(struct pci_dev *pdev)
935{
936 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
937}
Yuval Mintz29502192015-10-26 11:02:29 +0200938
Mintz, Yuval14d39642016-10-31 07:14:23 +0200939static void qede_shutdown(struct pci_dev *pdev)
940{
941 __qede_remove(pdev, QEDE_REMOVE_NORMAL);
942}
943
Yuval Mintz29502192015-10-26 11:02:29 +0200944/* -------------------------------------------------------------------------
945 * START OF LOAD / UNLOAD
946 * -------------------------------------------------------------------------
947 */
948
949static int qede_set_num_queues(struct qede_dev *edev)
950{
951 int rc;
952 u16 rss_num;
953
954 /* Setup queues according to possible resources*/
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400955 if (edev->req_queues)
956 rss_num = edev->req_queues;
Sudarsana Kalluru8edf0492015-11-30 12:25:01 +0200957 else
958 rss_num = netif_get_num_default_rss_queues() *
959 edev->dev_info.common.num_hwfns;
Yuval Mintz29502192015-10-26 11:02:29 +0200960
961 rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
962
963 rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
964 if (rc > 0) {
965 /* Managed to request interrupts for our queues */
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400966 edev->num_queues = rc;
Yuval Mintz29502192015-10-26 11:02:29 +0200967 DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400968 QEDE_QUEUE_CNT(edev), rss_num);
Yuval Mintz29502192015-10-26 11:02:29 +0200969 rc = 0;
970 }
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -0400971
972 edev->fp_num_tx = edev->req_num_tx;
973 edev->fp_num_rx = edev->req_num_rx;
974
Yuval Mintz29502192015-10-26 11:02:29 +0200975 return rc;
976}
977
978static void qede_free_mem_sb(struct qede_dev *edev,
979 struct qed_sb_info *sb_info)
980{
981 if (sb_info->sb_virt)
982 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
983 (void *)sb_info->sb_virt, sb_info->sb_phys);
984}
985
986/* This function allocates fast-path status block memory */
987static int qede_alloc_mem_sb(struct qede_dev *edev,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300988 struct qed_sb_info *sb_info, u16 sb_id)
Yuval Mintz29502192015-10-26 11:02:29 +0200989{
990 struct status_block *sb_virt;
991 dma_addr_t sb_phys;
992 int rc;
993
994 sb_virt = dma_alloc_coherent(&edev->pdev->dev,
Yuval Mintz1a635e42016-08-15 10:42:43 +0300995 sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
Yuval Mintz29502192015-10-26 11:02:29 +0200996 if (!sb_virt) {
997 DP_ERR(edev, "Status block allocation failed\n");
998 return -ENOMEM;
999 }
1000
1001 rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1002 sb_virt, sb_phys, sb_id,
1003 QED_SB_TYPE_L2_QUEUE);
1004 if (rc) {
1005 DP_ERR(edev, "Status block initialization failed\n");
1006 dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1007 sb_virt, sb_phys);
1008 return rc;
1009 }
1010
1011 return 0;
1012}
1013
1014static void qede_free_rx_buffers(struct qede_dev *edev,
1015 struct qede_rx_queue *rxq)
1016{
1017 u16 i;
1018
1019 for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1020 struct sw_rx_data *rx_buf;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001021 struct page *data;
Yuval Mintz29502192015-10-26 11:02:29 +02001022
1023 rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1024 data = rx_buf->data;
1025
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001026 dma_unmap_page(&edev->pdev->dev,
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001027 rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
Yuval Mintz29502192015-10-26 11:02:29 +02001028
1029 rx_buf->data = NULL;
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001030 __free_page(data);
Yuval Mintz29502192015-10-26 11:02:29 +02001031 }
1032}
1033
Yuval Mintz1a635e42016-08-15 10:42:43 +03001034static void qede_free_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
1035{
Manish Chopra55482ed2016-03-04 12:35:06 -05001036 int i;
1037
1038 if (edev->gro_disable)
1039 return;
1040
1041 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1042 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
Mintz, Yuval01e23012016-11-29 16:47:00 +02001043 struct sw_rx_data *replace_buf = &tpa_info->buffer;
Manish Chopra55482ed2016-03-04 12:35:06 -05001044
Manish Chopraf86af2d2016-04-20 03:03:27 -04001045 if (replace_buf->data) {
Manish Chopra55482ed2016-03-04 12:35:06 -05001046 dma_unmap_page(&edev->pdev->dev,
Manish Chopra09ec8e72016-05-18 07:43:57 -04001047 replace_buf->mapping,
Manish Chopra55482ed2016-03-04 12:35:06 -05001048 PAGE_SIZE, DMA_FROM_DEVICE);
1049 __free_page(replace_buf->data);
1050 }
1051 }
1052}
1053
Yuval Mintz1a635e42016-08-15 10:42:43 +03001054static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
Yuval Mintz29502192015-10-26 11:02:29 +02001055{
Manish Chopra55482ed2016-03-04 12:35:06 -05001056 qede_free_sge_mem(edev, rxq);
1057
Yuval Mintz29502192015-10-26 11:02:29 +02001058 /* Free rx buffers */
1059 qede_free_rx_buffers(edev, rxq);
1060
1061 /* Free the parallel SW ring */
1062 kfree(rxq->sw_rx_ring);
1063
1064 /* Free the real RQ ring used by FW */
1065 edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1066 edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1067}
1068
Yuval Mintz1a635e42016-08-15 10:42:43 +03001069static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq)
Manish Chopra55482ed2016-03-04 12:35:06 -05001070{
1071 dma_addr_t mapping;
1072 int i;
1073
Mintz, Yuval496e0512016-11-29 16:47:09 +02001074 /* Don't perform FW aggregations in case of XDP */
1075 if (edev->xdp_prog)
1076 edev->gro_disable = 1;
1077
Manish Chopra55482ed2016-03-04 12:35:06 -05001078 if (edev->gro_disable)
1079 return 0;
1080
1081 if (edev->ndev->mtu > PAGE_SIZE) {
1082 edev->gro_disable = 1;
1083 return 0;
1084 }
1085
1086 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1087 struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
Mintz, Yuval01e23012016-11-29 16:47:00 +02001088 struct sw_rx_data *replace_buf = &tpa_info->buffer;
Manish Chopra55482ed2016-03-04 12:35:06 -05001089
1090 replace_buf->data = alloc_pages(GFP_ATOMIC, 0);
1091 if (unlikely(!replace_buf->data)) {
1092 DP_NOTICE(edev,
1093 "Failed to allocate TPA skb pool [replacement buffer]\n");
1094 goto err;
1095 }
1096
1097 mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0,
Mintz, Yuval95129252016-11-02 16:36:46 +02001098 PAGE_SIZE, DMA_FROM_DEVICE);
Manish Chopra55482ed2016-03-04 12:35:06 -05001099 if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
1100 DP_NOTICE(edev,
1101 "Failed to map TPA replacement buffer\n");
1102 goto err;
1103 }
1104
Manish Chopra09ec8e72016-05-18 07:43:57 -04001105 replace_buf->mapping = mapping;
Mintz, Yuval01e23012016-11-29 16:47:00 +02001106 tpa_info->buffer.page_offset = 0;
1107 tpa_info->buffer_mapping = mapping;
1108 tpa_info->state = QEDE_AGG_STATE_NONE;
Manish Chopra55482ed2016-03-04 12:35:06 -05001109 }
1110
1111 return 0;
1112err:
1113 qede_free_sge_mem(edev, rxq);
1114 edev->gro_disable = 1;
1115 return -ENOMEM;
1116}
1117
Yuval Mintz29502192015-10-26 11:02:29 +02001118/* This function allocates all memory needed per Rx queue */
Yuval Mintz1a635e42016-08-15 10:42:43 +03001119static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
Yuval Mintz29502192015-10-26 11:02:29 +02001120{
Manish Chopraf86af2d2016-04-20 03:03:27 -04001121 int i, rc, size;
Yuval Mintz29502192015-10-26 11:02:29 +02001122
1123 rxq->num_rx_buffers = edev->q_num_rx_buffers;
1124
Yuval Mintz1a635e42016-08-15 10:42:43 +03001125 rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1126
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001127 if (rxq->rx_buf_size > PAGE_SIZE)
1128 rxq->rx_buf_size = PAGE_SIZE;
1129
Mintz, Yuval496e0512016-11-29 16:47:09 +02001130 /* Segment size to spilt a page in multiple equal parts,
1131 * unless XDP is used in which case we'd use the entire page.
1132 */
1133 if (!edev->xdp_prog)
1134 rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size);
1135 else
1136 rxq->rx_buf_seg_size = PAGE_SIZE;
Yuval Mintz29502192015-10-26 11:02:29 +02001137
1138 /* Allocate the parallel driver ring for Rx buffers */
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001139 size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
Yuval Mintz29502192015-10-26 11:02:29 +02001140 rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1141 if (!rxq->sw_rx_ring) {
1142 DP_ERR(edev, "Rx buffers ring allocation failed\n");
Manish Chopraf86af2d2016-04-20 03:03:27 -04001143 rc = -ENOMEM;
Yuval Mintz29502192015-10-26 11:02:29 +02001144 goto err;
1145 }
1146
1147 /* Allocate FW Rx ring */
1148 rc = edev->ops->common->chain_alloc(edev->cdev,
1149 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1150 QED_CHAIN_MODE_NEXT_PTR,
Yuval Mintza91eb522016-06-03 14:35:32 +03001151 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001152 RX_RING_SIZE,
Yuval Mintz29502192015-10-26 11:02:29 +02001153 sizeof(struct eth_rx_bd),
1154 &rxq->rx_bd_ring);
1155
1156 if (rc)
1157 goto err;
1158
1159 /* Allocate FW completion ring */
1160 rc = edev->ops->common->chain_alloc(edev->cdev,
1161 QED_CHAIN_USE_TO_CONSUME,
1162 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +03001163 QED_CHAIN_CNT_TYPE_U16,
Yuval Mintzfc48b7a2016-02-15 13:22:35 -05001164 RX_RING_SIZE,
Yuval Mintz29502192015-10-26 11:02:29 +02001165 sizeof(union eth_rx_cqe),
1166 &rxq->rx_comp_ring);
1167 if (rc)
1168 goto err;
1169
1170 /* Allocate buffers for the Rx ring */
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +02001171 rxq->filled_buffers = 0;
Yuval Mintz29502192015-10-26 11:02:29 +02001172 for (i = 0; i < rxq->num_rx_buffers; i++) {
Mintz, Yuvale3eef7e2017-01-01 13:57:04 +02001173 rc = qede_alloc_rx_buffer(rxq, false);
Manish Chopraf86af2d2016-04-20 03:03:27 -04001174 if (rc) {
1175 DP_ERR(edev,
1176 "Rx buffers allocation failed at index %d\n", i);
1177 goto err;
1178 }
Yuval Mintz29502192015-10-26 11:02:29 +02001179 }
1180
Manish Chopraf86af2d2016-04-20 03:03:27 -04001181 rc = qede_alloc_sge_mem(edev, rxq);
Yuval Mintz29502192015-10-26 11:02:29 +02001182err:
Manish Chopraf86af2d2016-04-20 03:03:27 -04001183 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02001184}
1185
Yuval Mintz1a635e42016-08-15 10:42:43 +03001186static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
Yuval Mintz29502192015-10-26 11:02:29 +02001187{
1188 /* Free the parallel SW ring */
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001189 if (txq->is_xdp)
1190 kfree(txq->sw_tx_ring.pages);
1191 else
1192 kfree(txq->sw_tx_ring.skbs);
Yuval Mintz29502192015-10-26 11:02:29 +02001193
1194 /* Free the real RQ ring used by FW */
1195 edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1196}
1197
1198/* This function allocates all memory needed per Tx queue */
Yuval Mintz1a635e42016-08-15 10:42:43 +03001199static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
Yuval Mintz29502192015-10-26 11:02:29 +02001200{
Yuval Mintz29502192015-10-26 11:02:29 +02001201 union eth_tx_bd_types *p_virt;
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001202 int size, rc;
Yuval Mintz29502192015-10-26 11:02:29 +02001203
1204 txq->num_tx_buffers = edev->q_num_tx_buffers;
1205
1206 /* Allocate the parallel driver ring for Tx buffers */
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001207 if (txq->is_xdp) {
1208 size = sizeof(*txq->sw_tx_ring.pages) * TX_RING_SIZE;
1209 txq->sw_tx_ring.pages = kzalloc(size, GFP_KERNEL);
1210 if (!txq->sw_tx_ring.pages)
1211 goto err;
1212 } else {
1213 size = sizeof(*txq->sw_tx_ring.skbs) * TX_RING_SIZE;
1214 txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1215 if (!txq->sw_tx_ring.skbs)
1216 goto err;
Yuval Mintz29502192015-10-26 11:02:29 +02001217 }
1218
1219 rc = edev->ops->common->chain_alloc(edev->cdev,
1220 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1221 QED_CHAIN_MODE_PBL,
Yuval Mintza91eb522016-06-03 14:35:32 +03001222 QED_CHAIN_CNT_TYPE_U16,
Mintz, Yuval087892d2016-10-29 17:04:35 +03001223 TX_RING_SIZE,
Yuval Mintza91eb522016-06-03 14:35:32 +03001224 sizeof(*p_virt), &txq->tx_pbl);
Yuval Mintz29502192015-10-26 11:02:29 +02001225 if (rc)
1226 goto err;
1227
1228 return 0;
1229
1230err:
1231 qede_free_mem_txq(edev, txq);
1232 return -ENOMEM;
1233}
1234
1235/* This function frees all memory of a single fp */
Yuval Mintz1a635e42016-08-15 10:42:43 +03001236static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
Yuval Mintz29502192015-10-26 11:02:29 +02001237{
Yuval Mintz29502192015-10-26 11:02:29 +02001238 qede_free_mem_sb(edev, fp->sb_info);
1239
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001240 if (fp->type & QEDE_FASTPATH_RX)
1241 qede_free_mem_rxq(edev, fp->rxq);
Yuval Mintz29502192015-10-26 11:02:29 +02001242
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001243 if (fp->type & QEDE_FASTPATH_TX)
Mintz, Yuval80439a12016-11-29 16:47:02 +02001244 qede_free_mem_txq(edev, fp->txq);
Yuval Mintz29502192015-10-26 11:02:29 +02001245}
1246
1247/* This function allocates all memory needed for a single fp (i.e. an entity
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001248 * which contains status block, one rx queue and/or multiple per-TC tx queues.
Yuval Mintz29502192015-10-26 11:02:29 +02001249 */
Yuval Mintz1a635e42016-08-15 10:42:43 +03001250static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
Yuval Mintz29502192015-10-26 11:02:29 +02001251{
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001252 int rc = 0;
Yuval Mintz29502192015-10-26 11:02:29 +02001253
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001254 rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
Yuval Mintz29502192015-10-26 11:02:29 +02001255 if (rc)
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001256 goto out;
Yuval Mintz29502192015-10-26 11:02:29 +02001257
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001258 if (fp->type & QEDE_FASTPATH_RX) {
1259 rc = qede_alloc_mem_rxq(edev, fp->rxq);
Yuval Mintz29502192015-10-26 11:02:29 +02001260 if (rc)
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001261 goto out;
1262 }
1263
1264 if (fp->type & QEDE_FASTPATH_XDP) {
1265 rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1266 if (rc)
1267 goto out;
Yuval Mintz29502192015-10-26 11:02:29 +02001268 }
1269
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001270 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02001271 rc = qede_alloc_mem_txq(edev, fp->txq);
1272 if (rc)
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001273 goto out;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001274 }
1275
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001276out:
Manish Chopraf86af2d2016-04-20 03:03:27 -04001277 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02001278}
1279
1280static void qede_free_mem_load(struct qede_dev *edev)
1281{
1282 int i;
1283
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001284 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +02001285 struct qede_fastpath *fp = &edev->fp_array[i];
1286
1287 qede_free_mem_fp(edev, fp);
1288 }
1289}
1290
1291/* This function allocates all qede memory at NIC load. */
1292static int qede_alloc_mem_load(struct qede_dev *edev)
1293{
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001294 int rc = 0, queue_id;
Yuval Mintz29502192015-10-26 11:02:29 +02001295
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001296 for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1297 struct qede_fastpath *fp = &edev->fp_array[queue_id];
Yuval Mintz29502192015-10-26 11:02:29 +02001298
1299 rc = qede_alloc_mem_fp(edev, fp);
Manish Chopraf86af2d2016-04-20 03:03:27 -04001300 if (rc) {
Yuval Mintz29502192015-10-26 11:02:29 +02001301 DP_ERR(edev,
Manish Chopraf86af2d2016-04-20 03:03:27 -04001302 "Failed to allocate memory for fastpath - rss id = %d\n",
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001303 queue_id);
Manish Chopraf86af2d2016-04-20 03:03:27 -04001304 qede_free_mem_load(edev);
1305 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02001306 }
Yuval Mintz29502192015-10-26 11:02:29 +02001307 }
1308
1309 return 0;
1310}
1311
1312/* This function inits fp content and resets the SB, RXQ and TXQ structures */
1313static void qede_init_fp(struct qede_dev *edev)
1314{
Mintz, Yuval80439a12016-11-29 16:47:02 +02001315 int queue_id, rxq_index = 0, txq_index = 0;
Yuval Mintz29502192015-10-26 11:02:29 +02001316 struct qede_fastpath *fp;
1317
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001318 for_each_queue(queue_id) {
1319 fp = &edev->fp_array[queue_id];
Yuval Mintz29502192015-10-26 11:02:29 +02001320
1321 fp->edev = edev;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001322 fp->id = queue_id;
Yuval Mintz29502192015-10-26 11:02:29 +02001323
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001324 if (fp->type & QEDE_FASTPATH_XDP) {
1325 fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1326 rxq_index);
1327 fp->xdp_tx->is_xdp = 1;
1328 }
Yuval Mintz29502192015-10-26 11:02:29 +02001329
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001330 if (fp->type & QEDE_FASTPATH_RX) {
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001331 fp->rxq->rxq_id = rxq_index++;
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001332
1333 /* Determine how to map buffers for this queue */
1334 if (fp->type & QEDE_FASTPATH_XDP)
1335 fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1336 else
1337 fp->rxq->data_direction = DMA_FROM_DEVICE;
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001338 fp->rxq->dev = &edev->pdev->dev;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001339 }
Yuval Mintz29502192015-10-26 11:02:29 +02001340
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001341 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02001342 fp->txq->index = txq_index++;
1343 if (edev->dev_info.is_legacy)
1344 fp->txq->is_legacy = 1;
Mintz, Yuval9eb22352016-11-29 16:47:08 +02001345 fp->txq->dev = &edev->pdev->dev;
Yuval Mintz29502192015-10-26 11:02:29 +02001346 }
1347
1348 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001349 edev->ndev->name, queue_id);
Yuval Mintz29502192015-10-26 11:02:29 +02001350 }
Manish Chopra55482ed2016-03-04 12:35:06 -05001351
1352 edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO);
Yuval Mintz29502192015-10-26 11:02:29 +02001353}
1354
1355static int qede_set_real_num_queues(struct qede_dev *edev)
1356{
1357 int rc = 0;
1358
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001359 rc = netif_set_real_num_tx_queues(edev->ndev, QEDE_TSS_COUNT(edev));
Yuval Mintz29502192015-10-26 11:02:29 +02001360 if (rc) {
1361 DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1362 return rc;
1363 }
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001364
1365 rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
Yuval Mintz29502192015-10-26 11:02:29 +02001366 if (rc) {
1367 DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1368 return rc;
1369 }
1370
1371 return 0;
1372}
1373
1374static void qede_napi_disable_remove(struct qede_dev *edev)
1375{
1376 int i;
1377
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001378 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +02001379 napi_disable(&edev->fp_array[i].napi);
1380
1381 netif_napi_del(&edev->fp_array[i].napi);
1382 }
1383}
1384
1385static void qede_napi_add_enable(struct qede_dev *edev)
1386{
1387 int i;
1388
1389 /* Add NAPI objects */
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001390 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +02001391 netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1392 qede_poll, NAPI_POLL_WEIGHT);
1393 napi_enable(&edev->fp_array[i].napi);
1394 }
1395}
1396
1397static void qede_sync_free_irqs(struct qede_dev *edev)
1398{
1399 int i;
1400
1401 for (i = 0; i < edev->int_info.used_cnt; i++) {
1402 if (edev->int_info.msix_cnt) {
1403 synchronize_irq(edev->int_info.msix[i].vector);
1404 free_irq(edev->int_info.msix[i].vector,
1405 &edev->fp_array[i]);
1406 } else {
1407 edev->ops->common->simd_handler_clean(edev->cdev, i);
1408 }
1409 }
1410
1411 edev->int_info.used_cnt = 0;
1412}
1413
1414static int qede_req_msix_irqs(struct qede_dev *edev)
1415{
1416 int i, rc;
1417
1418 /* Sanitize number of interrupts == number of prepared RSS queues */
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001419 if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
Yuval Mintz29502192015-10-26 11:02:29 +02001420 DP_ERR(edev,
1421 "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001422 QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
Yuval Mintz29502192015-10-26 11:02:29 +02001423 return -EINVAL;
1424 }
1425
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001426 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
Yuval Mintz29502192015-10-26 11:02:29 +02001427 rc = request_irq(edev->int_info.msix[i].vector,
1428 qede_msix_fp_int, 0, edev->fp_array[i].name,
1429 &edev->fp_array[i]);
1430 if (rc) {
1431 DP_ERR(edev, "Request fp %d irq failed\n", i);
1432 qede_sync_free_irqs(edev);
1433 return rc;
1434 }
1435 DP_VERBOSE(edev, NETIF_MSG_INTR,
1436 "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1437 edev->fp_array[i].name, i,
1438 &edev->fp_array[i]);
1439 edev->int_info.used_cnt++;
1440 }
1441
1442 return 0;
1443}
1444
1445static void qede_simd_fp_handler(void *cookie)
1446{
1447 struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1448
1449 napi_schedule_irqoff(&fp->napi);
1450}
1451
1452static int qede_setup_irqs(struct qede_dev *edev)
1453{
1454 int i, rc = 0;
1455
1456 /* Learn Interrupt configuration */
1457 rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1458 if (rc)
1459 return rc;
1460
1461 if (edev->int_info.msix_cnt) {
1462 rc = qede_req_msix_irqs(edev);
1463 if (rc)
1464 return rc;
1465 edev->ndev->irq = edev->int_info.msix[0].vector;
1466 } else {
1467 const struct qed_common_ops *ops;
1468
1469 /* qed should learn receive the RSS ids and callbacks */
1470 ops = edev->ops->common;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001471 for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
Yuval Mintz29502192015-10-26 11:02:29 +02001472 ops->simd_handler_config(edev->cdev,
1473 &edev->fp_array[i], i,
1474 qede_simd_fp_handler);
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001475 edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
Yuval Mintz29502192015-10-26 11:02:29 +02001476 }
1477 return 0;
1478}
1479
1480static int qede_drain_txq(struct qede_dev *edev,
Yuval Mintz1a635e42016-08-15 10:42:43 +03001481 struct qede_tx_queue *txq, bool allow_drain)
Yuval Mintz29502192015-10-26 11:02:29 +02001482{
1483 int rc, cnt = 1000;
1484
1485 while (txq->sw_tx_cons != txq->sw_tx_prod) {
1486 if (!cnt) {
1487 if (allow_drain) {
1488 DP_NOTICE(edev,
1489 "Tx queue[%d] is stuck, requesting MCP to drain\n",
1490 txq->index);
1491 rc = edev->ops->common->drain(edev->cdev);
1492 if (rc)
1493 return rc;
1494 return qede_drain_txq(edev, txq, false);
1495 }
1496 DP_NOTICE(edev,
1497 "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
1498 txq->index, txq->sw_tx_prod,
1499 txq->sw_tx_cons);
1500 return -ENODEV;
1501 }
1502 cnt--;
1503 usleep_range(1000, 2000);
1504 barrier();
1505 }
1506
1507 /* FW finished processing, wait for HW to transmit all tx packets */
1508 usleep_range(1000, 2000);
1509
1510 return 0;
1511}
1512
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001513static int qede_stop_txq(struct qede_dev *edev,
1514 struct qede_tx_queue *txq, int rss_id)
1515{
1516 return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
1517}
1518
Yuval Mintz29502192015-10-26 11:02:29 +02001519static int qede_stop_queues(struct qede_dev *edev)
1520{
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001521 struct qed_update_vport_params *vport_update_params;
Yuval Mintz29502192015-10-26 11:02:29 +02001522 struct qed_dev *cdev = edev->cdev;
Mintz, Yuval80439a12016-11-29 16:47:02 +02001523 struct qede_fastpath *fp;
1524 int rc, i;
Yuval Mintz29502192015-10-26 11:02:29 +02001525
1526 /* Disable the vport */
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001527 vport_update_params = vzalloc(sizeof(*vport_update_params));
1528 if (!vport_update_params)
1529 return -ENOMEM;
Yuval Mintz29502192015-10-26 11:02:29 +02001530
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001531 vport_update_params->vport_id = 0;
1532 vport_update_params->update_vport_active_flg = 1;
1533 vport_update_params->vport_active_flg = 0;
1534 vport_update_params->update_rss_flg = 0;
1535
1536 rc = edev->ops->vport_update(cdev, vport_update_params);
1537 vfree(vport_update_params);
1538
Yuval Mintz29502192015-10-26 11:02:29 +02001539 if (rc) {
1540 DP_ERR(edev, "Failed to update vport\n");
1541 return rc;
1542 }
1543
1544 /* Flush Tx queues. If needed, request drain from MCP */
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001545 for_each_queue(i) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02001546 fp = &edev->fp_array[i];
Yuval Mintz29502192015-10-26 11:02:29 +02001547
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001548 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02001549 rc = qede_drain_txq(edev, fp->txq, true);
1550 if (rc)
1551 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02001552 }
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001553
1554 if (fp->type & QEDE_FASTPATH_XDP) {
1555 rc = qede_drain_txq(edev, fp->xdp_tx, true);
1556 if (rc)
1557 return rc;
1558 }
Yuval Mintz29502192015-10-26 11:02:29 +02001559 }
1560
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001561 /* Stop all Queues in reverse order */
1562 for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
Mintz, Yuval80439a12016-11-29 16:47:02 +02001563 fp = &edev->fp_array[i];
Yuval Mintz29502192015-10-26 11:02:29 +02001564
Mintz, Yuval80439a12016-11-29 16:47:02 +02001565 /* Stop the Tx Queue(s) */
1566 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001567 rc = qede_stop_txq(edev, fp->txq, i);
1568 if (rc)
1569 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02001570 }
1571
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001572 /* Stop the Rx Queue */
Mintz, Yuval80439a12016-11-29 16:47:02 +02001573 if (fp->type & QEDE_FASTPATH_RX) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001574 rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001575 if (rc) {
1576 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1577 return rc;
1578 }
Yuval Mintz29502192015-10-26 11:02:29 +02001579 }
Mintz, Yuval496e0512016-11-29 16:47:09 +02001580
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001581 /* Stop the XDP forwarding queue */
1582 if (fp->type & QEDE_FASTPATH_XDP) {
1583 rc = qede_stop_txq(edev, fp->xdp_tx, i);
1584 if (rc)
1585 return rc;
1586
Mintz, Yuval496e0512016-11-29 16:47:09 +02001587 bpf_prog_put(fp->rxq->xdp_prog);
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001588 }
Yuval Mintz29502192015-10-26 11:02:29 +02001589 }
1590
1591 /* Stop the vport */
1592 rc = edev->ops->vport_stop(cdev, 0);
1593 if (rc)
1594 DP_ERR(edev, "Failed to stop VPORT\n");
1595
1596 return rc;
1597}
1598
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001599static int qede_start_txq(struct qede_dev *edev,
1600 struct qede_fastpath *fp,
1601 struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
1602{
1603 dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
1604 u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
1605 struct qed_queue_start_common_params params;
1606 struct qed_txq_start_ret_params ret_params;
1607 int rc;
1608
1609 memset(&params, 0, sizeof(params));
1610 memset(&ret_params, 0, sizeof(ret_params));
1611
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001612 /* Let the XDP queue share the queue-zone with one of the regular txq.
1613 * We don't really care about its coalescing.
1614 */
1615 if (txq->is_xdp)
1616 params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
1617 else
1618 params.queue_id = txq->index;
1619
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001620 params.sb = fp->sb_info->igu_sb_id;
1621 params.sb_idx = sb_idx;
1622
1623 rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
1624 page_cnt, &ret_params);
1625 if (rc) {
1626 DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
1627 return rc;
1628 }
1629
1630 txq->doorbell_addr = ret_params.p_doorbell;
1631 txq->handle = ret_params.p_handle;
1632
1633 /* Determine the FW consumer address associated */
1634 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
1635
1636 /* Prepare the doorbell parameters */
1637 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
1638 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
1639 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
1640 DQ_XCM_ETH_TX_BD_PROD_CMD);
1641 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
1642
1643 return rc;
1644}
1645
Yuval Mintza0d26d52016-06-19 15:18:13 +03001646static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
Yuval Mintz29502192015-10-26 11:02:29 +02001647{
Manish Chopra088c8612016-03-04 12:35:05 -05001648 int vlan_removal_en = 1;
Yuval Mintz29502192015-10-26 11:02:29 +02001649 struct qed_dev *cdev = edev->cdev;
Yuval Mintzfefb0202016-05-11 16:36:19 +03001650 struct qed_dev_info *qed_info = &edev->dev_info.common;
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001651 struct qed_update_vport_params *vport_update_params;
1652 struct qed_queue_start_common_params q_params;
Manish Chopra088c8612016-03-04 12:35:05 -05001653 struct qed_start_vport_params start = {0};
Mintz, Yuval80439a12016-11-29 16:47:02 +02001654 int rc, i;
Yuval Mintz29502192015-10-26 11:02:29 +02001655
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001656 if (!edev->num_queues) {
Yuval Mintz29502192015-10-26 11:02:29 +02001657 DP_ERR(edev,
1658 "Cannot update V-VPORT as active as there are no Rx queues\n");
1659 return -EINVAL;
1660 }
1661
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001662 vport_update_params = vzalloc(sizeof(*vport_update_params));
1663 if (!vport_update_params)
1664 return -ENOMEM;
1665
Manish Chopra55482ed2016-03-04 12:35:06 -05001666 start.gro_enable = !edev->gro_disable;
Manish Chopra088c8612016-03-04 12:35:05 -05001667 start.mtu = edev->ndev->mtu;
1668 start.vport_id = 0;
1669 start.drop_ttl0 = true;
1670 start.remove_inner_vlan = vlan_removal_en;
Yuval Mintz7f7a1442016-07-27 14:45:22 +03001671 start.clear_stats = clear_stats;
Manish Chopra088c8612016-03-04 12:35:05 -05001672
1673 rc = edev->ops->vport_start(cdev, &start);
Yuval Mintz29502192015-10-26 11:02:29 +02001674
1675 if (rc) {
1676 DP_ERR(edev, "Start V-PORT failed %d\n", rc);
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001677 goto out;
Yuval Mintz29502192015-10-26 11:02:29 +02001678 }
1679
1680 DP_VERBOSE(edev, NETIF_MSG_IFUP,
1681 "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
Manish Chopra088c8612016-03-04 12:35:05 -05001682 start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
Yuval Mintz29502192015-10-26 11:02:29 +02001683
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001684 for_each_queue(i) {
Yuval Mintz29502192015-10-26 11:02:29 +02001685 struct qede_fastpath *fp = &edev->fp_array[i];
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001686 dma_addr_t p_phys_table;
1687 u32 page_cnt;
Yuval Mintz29502192015-10-26 11:02:29 +02001688
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001689 if (fp->type & QEDE_FASTPATH_RX) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001690 struct qed_rxq_start_ret_params ret_params;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001691 struct qede_rx_queue *rxq = fp->rxq;
1692 __le16 *val;
Yuval Mintz29502192015-10-26 11:02:29 +02001693
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001694 memset(&ret_params, 0, sizeof(ret_params));
Yuval Mintz29502192015-10-26 11:02:29 +02001695 memset(&q_params, 0, sizeof(q_params));
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001696 q_params.queue_id = rxq->rxq_id;
1697 q_params.vport_id = 0;
1698 q_params.sb = fp->sb_info->igu_sb_id;
1699 q_params.sb_idx = RX_PI;
1700
1701 p_phys_table =
1702 qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
1703 page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
1704
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001705 rc = edev->ops->q_rx_start(cdev, i, &q_params,
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001706 rxq->rx_buf_size,
1707 rxq->rx_bd_ring.p_phys_addr,
1708 p_phys_table,
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001709 page_cnt, &ret_params);
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001710 if (rc) {
1711 DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
1712 rc);
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001713 goto out;
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001714 }
1715
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001716 /* Use the return parameters */
1717 rxq->hw_rxq_prod_addr = ret_params.p_prod;
1718 rxq->handle = ret_params.p_handle;
1719
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001720 val = &fp->sb_info->sb_virt->pi_array[RX_PI];
1721 rxq->hw_cons_ptr = val;
1722
1723 qede_update_rx_prod(edev, rxq);
1724 }
1725
Mintz, Yuval496e0512016-11-29 16:47:09 +02001726 if (fp->type & QEDE_FASTPATH_XDP) {
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001727 rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
1728 if (rc)
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001729 goto out;
Mintz, Yuvalcb6aeb02016-11-29 16:47:10 +02001730
Mintz, Yuval496e0512016-11-29 16:47:09 +02001731 fp->rxq->xdp_prog = bpf_prog_add(edev->xdp_prog, 1);
1732 if (IS_ERR(fp->rxq->xdp_prog)) {
1733 rc = PTR_ERR(fp->rxq->xdp_prog);
1734 fp->rxq->xdp_prog = NULL;
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001735 goto out;
Mintz, Yuval496e0512016-11-29 16:47:09 +02001736 }
1737 }
1738
Mintz, Yuval80439a12016-11-29 16:47:02 +02001739 if (fp->type & QEDE_FASTPATH_TX) {
Mintz, Yuval3da7a372016-11-29 16:47:06 +02001740 rc = qede_start_txq(edev, fp, fp->txq, i, TX_PI(0));
1741 if (rc)
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001742 goto out;
Yuval Mintz29502192015-10-26 11:02:29 +02001743 }
1744 }
1745
1746 /* Prepare and send the vport enable */
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001747 vport_update_params->vport_id = start.vport_id;
1748 vport_update_params->update_vport_active_flg = 1;
1749 vport_update_params->vport_active_flg = 1;
Yuval Mintz29502192015-10-26 11:02:29 +02001750
Yuval Mintz831bfb0e2016-05-11 16:36:25 +03001751 if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
1752 qed_info->tx_switching) {
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001753 vport_update_params->update_tx_switching_flg = 1;
1754 vport_update_params->tx_switching_flg = 1;
Yuval Mintz831bfb0e2016-05-11 16:36:25 +03001755 }
1756
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001757 qede_fill_rss_params(edev, &vport_update_params->rss_params,
1758 &vport_update_params->update_rss_flg);
Sudarsana Reddy Kalluru961acde2016-04-10 12:43:01 +03001759
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001760 rc = edev->ops->vport_update(cdev, vport_update_params);
1761 if (rc)
Yuval Mintz29502192015-10-26 11:02:29 +02001762 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
Yuval Mintz29502192015-10-26 11:02:29 +02001763
Mintz, Yuvalf29ffdb2017-01-01 13:57:07 +02001764out:
1765 vfree(vport_update_params);
1766 return rc;
Yuval Mintz29502192015-10-26 11:02:29 +02001767}
1768
1769enum qede_unload_mode {
1770 QEDE_UNLOAD_NORMAL,
1771};
1772
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001773static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
1774 bool is_locked)
Yuval Mintz29502192015-10-26 11:02:29 +02001775{
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02001776 struct qed_link_params link_params;
Yuval Mintz29502192015-10-26 11:02:29 +02001777 int rc;
1778
1779 DP_INFO(edev, "Starting qede unload\n");
1780
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001781 if (!is_locked)
1782 __qede_lock(edev);
1783
Ram Amranicee9fbd2016-10-01 21:59:56 +03001784 qede_roce_dev_event_close(edev);
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02001785 edev->state = QEDE_STATE_CLOSED;
1786
Yuval Mintz29502192015-10-26 11:02:29 +02001787 /* Close OS Tx */
1788 netif_tx_disable(edev->ndev);
1789 netif_carrier_off(edev->ndev);
1790
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02001791 /* Reset the link */
1792 memset(&link_params, 0, sizeof(link_params));
1793 link_params.link_up = false;
1794 edev->ops->common->set_link(edev->cdev, &link_params);
Yuval Mintz29502192015-10-26 11:02:29 +02001795 rc = qede_stop_queues(edev);
1796 if (rc) {
1797 qede_sync_free_irqs(edev);
1798 goto out;
1799 }
1800
1801 DP_INFO(edev, "Stopped Queues\n");
1802
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02001803 qede_vlan_mark_nonconfigured(edev);
Yuval Mintz29502192015-10-26 11:02:29 +02001804 edev->ops->fastpath_stop(edev->cdev);
1805
1806 /* Release the interrupts */
1807 qede_sync_free_irqs(edev);
1808 edev->ops->common->set_fp_int(edev->cdev, 0);
1809
1810 qede_napi_disable_remove(edev);
1811
1812 qede_free_mem_load(edev);
1813 qede_free_fp_array(edev);
1814
1815out:
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001816 if (!is_locked)
1817 __qede_unlock(edev);
Yuval Mintz29502192015-10-26 11:02:29 +02001818 DP_INFO(edev, "Ending qede unload\n");
1819}
1820
1821enum qede_load_mode {
1822 QEDE_LOAD_NORMAL,
Yuval Mintza0d26d52016-06-19 15:18:13 +03001823 QEDE_LOAD_RELOAD,
Yuval Mintz29502192015-10-26 11:02:29 +02001824};
1825
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001826static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
1827 bool is_locked)
Yuval Mintz29502192015-10-26 11:02:29 +02001828{
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02001829 struct qed_link_params link_params;
1830 struct qed_link_output link_output;
Yuval Mintz29502192015-10-26 11:02:29 +02001831 int rc;
1832
1833 DP_INFO(edev, "Starting qede load\n");
1834
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001835 if (!is_locked)
1836 __qede_lock(edev);
1837
Yuval Mintz29502192015-10-26 11:02:29 +02001838 rc = qede_set_num_queues(edev);
1839 if (rc)
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001840 goto out;
Yuval Mintz29502192015-10-26 11:02:29 +02001841
1842 rc = qede_alloc_fp_array(edev);
1843 if (rc)
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001844 goto out;
Yuval Mintz29502192015-10-26 11:02:29 +02001845
1846 qede_init_fp(edev);
1847
1848 rc = qede_alloc_mem_load(edev);
1849 if (rc)
1850 goto err1;
Mintz, Yuval80439a12016-11-29 16:47:02 +02001851 DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
1852 QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
Yuval Mintz29502192015-10-26 11:02:29 +02001853
1854 rc = qede_set_real_num_queues(edev);
1855 if (rc)
1856 goto err2;
1857
1858 qede_napi_add_enable(edev);
1859 DP_INFO(edev, "Napi added and enabled\n");
1860
1861 rc = qede_setup_irqs(edev);
1862 if (rc)
1863 goto err3;
1864 DP_INFO(edev, "Setup IRQs succeeded\n");
1865
Yuval Mintza0d26d52016-06-19 15:18:13 +03001866 rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
Yuval Mintz29502192015-10-26 11:02:29 +02001867 if (rc)
1868 goto err4;
1869 DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
1870
1871 /* Add primary mac and set Rx filters */
1872 ether_addr_copy(edev->primary_mac, edev->ndev->dev_addr);
1873
Sudarsana Reddy Kalluru7c1bfca2016-02-18 17:00:40 +02001874 /* Program un-configured VLANs */
1875 qede_configure_vlan_filters(edev);
1876
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02001877 /* Ask for link-up using current configuration */
1878 memset(&link_params, 0, sizeof(link_params));
1879 link_params.link_up = true;
1880 edev->ops->common->set_link(edev->cdev, &link_params);
1881
1882 /* Query whether link is already-up */
1883 memset(&link_output, 0, sizeof(link_output));
1884 edev->ops->common->get_link(edev->cdev, &link_output);
Ram Amranicee9fbd2016-10-01 21:59:56 +03001885 qede_roce_dev_event_open(edev);
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02001886 qede_link_update(edev, &link_output);
1887
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001888 edev->state = QEDE_STATE_OPEN;
1889
Yuval Mintz29502192015-10-26 11:02:29 +02001890 DP_INFO(edev, "Ending successfully qede load\n");
1891
Yuval Mintz29502192015-10-26 11:02:29 +02001892
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001893 goto out;
Yuval Mintz29502192015-10-26 11:02:29 +02001894err4:
1895 qede_sync_free_irqs(edev);
1896 memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info));
1897err3:
1898 qede_napi_disable_remove(edev);
1899err2:
1900 qede_free_mem_load(edev);
1901err1:
1902 edev->ops->common->set_fp_int(edev->cdev, 0);
1903 qede_free_fp_array(edev);
Sudarsana Reddy Kalluru9a4d7e82016-08-23 10:56:55 -04001904 edev->num_queues = 0;
1905 edev->fp_num_tx = 0;
1906 edev->fp_num_rx = 0;
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001907out:
1908 if (!is_locked)
1909 __qede_unlock(edev);
1910
Yuval Mintz29502192015-10-26 11:02:29 +02001911 return rc;
1912}
1913
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001914/* 'func' should be able to run between unload and reload assuming interface
1915 * is actually running, or afterwards in case it's currently DOWN.
1916 */
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02001917void qede_reload(struct qede_dev *edev,
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001918 struct qede_reload_args *args, bool is_locked)
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02001919{
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001920 if (!is_locked)
1921 __qede_lock(edev);
1922
1923 /* Since qede_lock is held, internal state wouldn't change even
1924 * if netdev state would start transitioning. Check whether current
1925 * internal configuration indicates device is up, then reload.
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02001926 */
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001927 if (edev->state == QEDE_STATE_OPEN) {
1928 qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
1929 if (args)
1930 args->func(edev, args);
1931 qede_load(edev, QEDE_LOAD_RELOAD, true);
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02001932
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001933 /* Since no one is going to do it for us, re-configure */
1934 qede_config_rx_mode(edev->ndev);
1935 } else if (args) {
1936 args->func(edev, args);
1937 }
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02001938
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001939 if (!is_locked)
1940 __qede_unlock(edev);
Sudarsana Kalluru133fac02015-10-26 11:02:34 +02001941}
1942
Yuval Mintz29502192015-10-26 11:02:29 +02001943/* called with rtnl_lock */
1944static int qede_open(struct net_device *ndev)
1945{
1946 struct qede_dev *edev = netdev_priv(ndev);
Manish Choprab18e1702016-04-14 01:38:30 -04001947 int rc;
Yuval Mintz29502192015-10-26 11:02:29 +02001948
1949 netif_carrier_off(ndev);
1950
1951 edev->ops->common->set_power_state(edev->cdev, PCI_D0);
1952
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001953 rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
Manish Choprab18e1702016-04-14 01:38:30 -04001954 if (rc)
1955 return rc;
1956
Alexander Duyckf9f082a2016-06-16 12:22:57 -07001957 udp_tunnel_get_rx_info(ndev);
1958
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001959 edev->ops->common->update_drv_state(edev->cdev, true);
1960
Manish Choprab18e1702016-04-14 01:38:30 -04001961 return 0;
Yuval Mintz29502192015-10-26 11:02:29 +02001962}
1963
1964static int qede_close(struct net_device *ndev)
1965{
1966 struct qede_dev *edev = netdev_priv(ndev);
1967
Mintz, Yuval567b3c12016-11-29 16:47:05 +02001968 qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
Yuval Mintz29502192015-10-26 11:02:29 +02001969
Sudarsana Kalluru0fefbfb2016-10-31 07:14:21 +02001970 edev->ops->common->update_drv_state(edev->cdev, false);
1971
Yuval Mintz29502192015-10-26 11:02:29 +02001972 return 0;
1973}
Sudarsana Kalluru0d8e0aa2015-10-26 11:02:30 +02001974
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02001975static void qede_link_update(void *dev, struct qed_link_output *link)
1976{
1977 struct qede_dev *edev = dev;
1978
1979 if (!netif_running(edev->ndev)) {
1980 DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n");
1981 return;
1982 }
1983
1984 if (link->link_up) {
Yuval Mintz8e025ae2016-02-24 16:52:47 +02001985 if (!netif_carrier_ok(edev->ndev)) {
1986 DP_NOTICE(edev, "Link is up\n");
1987 netif_tx_start_all_queues(edev->ndev);
1988 netif_carrier_on(edev->ndev);
1989 }
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02001990 } else {
Yuval Mintz8e025ae2016-02-24 16:52:47 +02001991 if (netif_carrier_ok(edev->ndev)) {
1992 DP_NOTICE(edev, "Link is down\n");
1993 netif_tx_disable(edev->ndev);
1994 netif_carrier_off(edev->ndev);
1995 }
Sudarsana Kallurua2ec6172015-10-26 11:02:32 +02001996 }
1997}