blob: 1644896568c441df16cc365848bc2c3321acefb3 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053044/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
Wei Yongjune6053dd2016-09-25 15:40:36 +000047static struct workqueue_struct *be_err_recovery_workq;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053048
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070050 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070051 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070052 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000054 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000055 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000056 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000057 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070058 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
Sathya Perlab7172412016-07-27 05:26:18 -040061
62/* Workqueue used by all functions for defering cmd calls to the adapter */
Wei Yongjune6053dd2016-09-25 15:40:36 +000063static struct workqueue_struct *be_wq;
Sathya Perlab7172412016-07-27 05:26:18 -040064
Ajit Khaparde7c185272010-07-29 06:16:33 +000065/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070066static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000067 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053095 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000099};
Kalesh APe2fb1af2014-09-19 15:46:58 +0530100
Ajit Khaparde7c185272010-07-29 06:16:33 +0000101/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -0700102static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +0000103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530124 "ECRC",
125 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700126 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000134 "Unknown"
135};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530145
Sathya Perla1cfafab2012-02-23 18:50:15 +0000146 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000149 mem->va = NULL;
150 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530154 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000165 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return 0;
167}
168
Somnath Kotur68c45a22013-03-14 02:42:07 +0000169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170{
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530174 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000179 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000181 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000183
Sathya Perladb3ea782011-08-22 19:41:52 +0000184 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530196 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530207
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000213
214 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220{
221 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530222
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000228
229 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000230 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000241
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530242 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000243 return;
244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253}
254
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256{
257 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530258
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000262
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530263 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000264 return;
265
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270}
271
Suresh Reddy988d44b2016-09-07 19:57:52 +0530272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
Ivan Vecerab4c30222017-01-06 20:30:02 +0100278 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +0530279 /* mac already added, skip addition */
280 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
281 return 0;
282 }
283 }
284
285 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
286 &adapter->pmac_id[0], 0);
287}
288
289static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
290{
291 int i;
292
293 /* Skip deletion if the programmed mac is
294 * being used in uc-list
295 */
296 for (i = 0; i < adapter->uc_macs; i++) {
297 if (adapter->pmac_id[i + 1] == pmac_id)
298 return;
299 }
300 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
301}
302
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700303static int be_mac_addr_set(struct net_device *netdev, void *p)
304{
305 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530306 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700307 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530308 int status;
309 u8 mac[ETH_ALEN];
Suresh Reddy988d44b2016-09-07 19:57:52 +0530310 u32 old_pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000312 if (!is_valid_ether_addr(addr->sa_data))
313 return -EADDRNOTAVAIL;
314
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530315 /* Proceed further only if, User provided MAC is different
316 * from active MAC
317 */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530318 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530319 return 0;
320
Ivan Veceracc439962017-01-13 22:38:29 +0100321 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
322 * address
323 */
324 if (BEx_chip(adapter) && be_virtfn(adapter) &&
325 !check_privilege(adapter, BE_PRIV_FILTMGMT))
326 return -EPERM;
327
Kalesh APbcc84142015-08-05 03:27:48 -0400328 /* if device is not running, copy MAC to netdev->dev_addr */
329 if (!netif_running(netdev))
330 goto done;
331
Sathya Perla5a712c12013-07-23 15:24:59 +0530332 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
333 * privilege or if PF did not provision the new MAC address.
334 * On BE3, this cmd will always fail if the VF doesn't have the
335 * FILTMGMT privilege. This failure is OK, only if the PF programmed
336 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000337 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530338 mutex_lock(&adapter->rx_filter_lock);
339 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
Sathya Perla5a712c12013-07-23 15:24:59 +0530340 if (!status) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530341
342 /* Delete the old programmed MAC. This call may fail if the
343 * old MAC was already deleted by the PF driver.
344 */
345 if (adapter->pmac_id[0] != old_pmac_id)
Suresh Reddy988d44b2016-09-07 19:57:52 +0530346 be_dev_mac_del(adapter, old_pmac_id);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000347 }
348
Suresh Reddy988d44b2016-09-07 19:57:52 +0530349 mutex_unlock(&adapter->rx_filter_lock);
Sathya Perla5a712c12013-07-23 15:24:59 +0530350 /* Decide if the new MAC is successfully activated only after
351 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000352 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530353 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
Suresh Reddyb188f092014-01-15 13:23:39 +0530354 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000355 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000356 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700357
Sathya Perla5a712c12013-07-23 15:24:59 +0530358 /* The MAC change did not happen, either due to lack of privilege
359 * or PF didn't pre-provision.
360 */
dingtianhong61d23e92013-12-30 15:40:43 +0800361 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530362 status = -EPERM;
363 goto err;
364 }
Ivan Vecera6129fd92017-01-31 20:01:31 +0100365
366 /* Remember currently programmed MAC */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530367 ether_addr_copy(adapter->dev_mac, addr->sa_data);
Ivan Vecera6129fd92017-01-31 20:01:31 +0100368done:
Kalesh APbcc84142015-08-05 03:27:48 -0400369 ether_addr_copy(netdev->dev_addr, addr->sa_data);
370 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000371 return 0;
372err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530373 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700374 return status;
375}
376
Sathya Perlaca34fe32012-11-06 17:48:56 +0000377/* BE2 supports only v0 cmd */
378static void *hw_stats_from_cmd(struct be_adapter *adapter)
379{
380 if (BE2_chip(adapter)) {
381 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
382
383 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500384 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000385 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
386
387 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500388 } else {
389 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
390
391 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000392 }
393}
394
395/* BE2 supports only v0 cmd */
396static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
397{
398 if (BE2_chip(adapter)) {
399 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
400
401 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500402 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000403 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
404
405 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500406 } else {
407 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
408
409 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000410 }
411}
412
413static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000415 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 drvs->rx_pause_frames = port_stats->rx_pause_frames;
424 drvs->rx_crc_errors = port_stats->rx_crc_errors;
425 drvs->rx_control_frames = port_stats->rx_control_frames;
426 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
427 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
428 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
429 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
430 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
431 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
432 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
433 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
434 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
435 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
436 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000437 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000440 drvs->rx_address_filtered =
441 port_stats->rx_address_filtered +
442 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
445
446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
448
449 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000450 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000453 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000454 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000455 drvs->forwarded_packets = rxf_stats->forwarded_packets;
456 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
458 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000459 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
460}
461
Sathya Perlaca34fe32012-11-06 17:48:56 +0000462static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000463{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000464 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
465 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
466 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000467 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000468 &rxf_stats->port[adapter->port_num];
469 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000470
Sathya Perlaac124ff2011-07-25 19:10:14 +0000471 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000472 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
473 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000474 drvs->rx_pause_frames = port_stats->rx_pause_frames;
475 drvs->rx_crc_errors = port_stats->rx_crc_errors;
476 drvs->rx_control_frames = port_stats->rx_control_frames;
477 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
478 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
479 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
480 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
481 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
482 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
483 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
484 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
485 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
486 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
487 drvs->rx_dropped_header_too_small =
488 port_stats->rx_dropped_header_too_small;
489 drvs->rx_input_fifo_overflow_drop =
490 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000491 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 drvs->rx_alignment_symbol_errors =
493 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000495 drvs->tx_pauseframes = port_stats->tx_pauseframes;
496 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000497 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000498 drvs->jabber_events = port_stats->jabber_events;
499 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000501 drvs->forwarded_packets = rxf_stats->forwarded_packets;
502 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000503 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
504 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000505 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
506}
507
Ajit Khaparde61000862013-10-03 16:16:33 -0500508static void populate_be_v2_stats(struct be_adapter *adapter)
509{
510 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
511 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
512 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
513 struct be_port_rxf_stats_v2 *port_stats =
514 &rxf_stats->port[adapter->port_num];
515 struct be_drv_stats *drvs = &adapter->drv_stats;
516
517 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
518 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
519 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
520 drvs->rx_pause_frames = port_stats->rx_pause_frames;
521 drvs->rx_crc_errors = port_stats->rx_crc_errors;
522 drvs->rx_control_frames = port_stats->rx_control_frames;
523 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
524 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
525 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
526 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
527 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
528 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
529 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
530 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
533 drvs->rx_dropped_header_too_small =
534 port_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop =
536 port_stats->rx_input_fifo_overflow_drop;
537 drvs->rx_address_filtered = port_stats->rx_address_filtered;
538 drvs->rx_alignment_symbol_errors =
539 port_stats->rx_alignment_symbol_errors;
540 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
541 drvs->tx_pauseframes = port_stats->tx_pauseframes;
542 drvs->tx_controlframes = port_stats->tx_controlframes;
543 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
544 drvs->jabber_events = port_stats->jabber_events;
545 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
546 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
547 drvs->forwarded_packets = rxf_stats->forwarded_packets;
548 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
549 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
550 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
551 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530552 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500553 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
554 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
555 drvs->rx_roce_frames = port_stats->roce_frames_received;
556 drvs->roce_drops_crc = port_stats->roce_drops_crc;
557 drvs->roce_drops_payload_len =
558 port_stats->roce_drops_payload_len;
559 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500560}
561
Selvin Xavier005d5692011-05-16 07:36:35 +0000562static void populate_lancer_stats(struct be_adapter *adapter)
563{
Selvin Xavier005d5692011-05-16 07:36:35 +0000564 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530565 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000566
567 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
568 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
569 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
570 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000571 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000573 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
574 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
575 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
576 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
577 drvs->rx_dropped_tcp_length =
578 pport_stats->rx_dropped_invalid_tcp_length;
579 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
580 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
581 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
582 drvs->rx_dropped_header_too_small =
583 pport_stats->rx_dropped_header_too_small;
584 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000585 drvs->rx_address_filtered =
586 pport_stats->rx_address_filtered +
587 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000588 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000589 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000590 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
591 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000592 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000593 drvs->forwarded_packets = pport_stats->num_forwards_lo;
594 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000595 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000596 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000597}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000598
Sathya Perla09c1c682011-08-22 19:41:53 +0000599static void accumulate_16bit_val(u32 *acc, u16 val)
600{
601#define lo(x) (x & 0xFFFF)
602#define hi(x) (x & 0xFFFF0000)
603 bool wrapped = val < lo(*acc);
604 u32 newacc = hi(*acc) + val;
605
606 if (wrapped)
607 newacc += 65536;
608 ACCESS_ONCE(*acc) = newacc;
609}
610
Jingoo Han4188e7d2013-08-05 18:02:02 +0900611static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530612 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000613{
614 if (!BEx_chip(adapter))
615 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
616 else
617 /* below erx HW counter can actually wrap around after
618 * 65535. Driver accumulates a 32-bit value
619 */
620 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
621 (u16)erx_stat);
622}
623
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000624void be_parse_stats(struct be_adapter *adapter)
625{
Ajit Khaparde61000862013-10-03 16:16:33 -0500626 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000627 struct be_rx_obj *rxo;
628 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000629 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000630
Sathya Perlaca34fe32012-11-06 17:48:56 +0000631 if (lancer_chip(adapter)) {
632 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000633 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000634 if (BE2_chip(adapter))
635 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500636 else if (BE3_chip(adapter))
637 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000638 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500639 else
640 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000641
Ajit Khaparde61000862013-10-03 16:16:33 -0500642 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000643 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000644 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
645 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000646 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000647 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000648}
649
Sathya Perlaab1594e2011-07-25 19:10:15 +0000650static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530651 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000653 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000654 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700655 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000656 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000657 u64 pkts, bytes;
658 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700659 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660
Sathya Perla3abcded2010-10-03 22:12:27 -0700661 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000662 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530663
Sathya Perlaab1594e2011-07-25 19:10:15 +0000664 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700665 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 pkts = rx_stats(rxo)->rx_pkts;
667 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700668 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 stats->rx_packets += pkts;
670 stats->rx_bytes += bytes;
671 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
672 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
673 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700674 }
675
Sathya Perla3c8def92011-06-12 20:01:58 +0000676 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000677 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530678
Sathya Perlaab1594e2011-07-25 19:10:15 +0000679 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700680 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000681 pkts = tx_stats(txo)->tx_pkts;
682 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700683 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000684 stats->tx_packets += pkts;
685 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000686 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687
688 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000689 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000690 drvs->rx_alignment_symbol_errors +
691 drvs->rx_in_range_errors +
692 drvs->rx_out_range_errors +
693 drvs->rx_frame_too_long +
694 drvs->rx_dropped_too_small +
695 drvs->rx_dropped_too_short +
696 drvs->rx_dropped_header_too_small +
697 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000698 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000701 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000702 drvs->rx_out_range_errors +
703 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000704
Sathya Perlaab1594e2011-07-25 19:10:15 +0000705 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
707 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000708 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000709
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 /* receiver fifo overrun */
711 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000712 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000713 drvs->rx_input_fifo_overflow_drop +
714 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000715 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700716}
717
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000718void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 struct net_device *netdev = adapter->netdev;
721
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000722 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000723 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000724 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000726
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530727 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000728 netif_carrier_on(netdev);
729 else
730 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200731
732 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733}
734
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530735static int be_gso_hdr_len(struct sk_buff *skb)
736{
737 if (skb->encapsulation)
738 return skb_inner_transport_offset(skb) +
739 inner_tcp_hdrlen(skb);
740 return skb_transport_offset(skb) + tcp_hdrlen(skb);
741}
742
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500743static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700744{
Sathya Perla3c8def92011-06-12 20:01:58 +0000745 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530746 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
747 /* Account for headers which get duplicated in TSO pkt */
748 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000749
Sathya Perlaab1594e2011-07-25 19:10:15 +0000750 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000751 stats->tx_reqs++;
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530752 stats->tx_bytes += skb->len + dup_hdr_len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530753 stats->tx_pkts += tx_pkts;
754 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
755 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000756 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757}
758
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500759/* Returns number of WRBs needed for the skb */
760static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500762 /* +1 for the header wrb */
763 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700764}
765
766static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
767{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500768 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
769 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
770 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
771 wrb->rsvd0 = 0;
772}
773
774/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
775 * to avoid the swap and shift/mask operations in wrb_fill().
776 */
777static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
778{
779 wrb->frag_pa_hi = 0;
780 wrb->frag_pa_lo = 0;
781 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000782 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783}
784
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000785static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530786 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000787{
788 u8 vlan_prio;
789 u16 vlan_tag;
790
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100791 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000792 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
793 /* If vlan priority provided by OS is NOT in available bmap */
794 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
795 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500796 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000797
798 return vlan_tag;
799}
800
Sathya Perlac9c47142014-03-27 10:46:19 +0530801/* Used only for IP tunnel packets */
802static u16 skb_inner_ip_proto(struct sk_buff *skb)
803{
804 return (inner_ip_hdr(skb)->version == 4) ?
805 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
806}
807
808static u16 skb_ip_proto(struct sk_buff *skb)
809{
810 return (ip_hdr(skb)->version == 4) ?
811 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
812}
813
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530814static inline bool be_is_txq_full(struct be_tx_obj *txo)
815{
816 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
817}
818
819static inline bool be_can_txq_wake(struct be_tx_obj *txo)
820{
821 return atomic_read(&txo->q.used) < txo->q.len / 2;
822}
823
824static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
825{
826 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
827}
828
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530829static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
830 struct sk_buff *skb,
831 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530833 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700834
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000835 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530836 BE_WRB_F_SET(wrb_params->features, LSO, 1);
837 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000838 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530839 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530841 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530842 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530843 proto = skb_inner_ip_proto(skb);
844 } else {
845 proto = skb_ip_proto(skb);
846 }
847 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530848 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530849 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530850 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851 }
852
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100853 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530854 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
855 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700856 }
857
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530858 BE_WRB_F_SET(wrb_params->features, CRC, 1);
859}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500860
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530861static void wrb_fill_hdr(struct be_adapter *adapter,
862 struct be_eth_hdr_wrb *hdr,
863 struct be_wrb_params *wrb_params,
864 struct sk_buff *skb)
865{
866 memset(hdr, 0, sizeof(*hdr));
867
868 SET_TX_WRB_HDR_BITS(crc, hdr,
869 BE_WRB_F_GET(wrb_params->features, CRC));
870 SET_TX_WRB_HDR_BITS(ipcs, hdr,
871 BE_WRB_F_GET(wrb_params->features, IPCS));
872 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
873 BE_WRB_F_GET(wrb_params->features, TCPCS));
874 SET_TX_WRB_HDR_BITS(udpcs, hdr,
875 BE_WRB_F_GET(wrb_params->features, UDPCS));
876
877 SET_TX_WRB_HDR_BITS(lso, hdr,
878 BE_WRB_F_GET(wrb_params->features, LSO));
879 SET_TX_WRB_HDR_BITS(lso6, hdr,
880 BE_WRB_F_GET(wrb_params->features, LSO6));
881 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
882
883 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
884 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500885 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530886 SET_TX_WRB_HDR_BITS(event, hdr,
887 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
888 SET_TX_WRB_HDR_BITS(vlan, hdr,
889 BE_WRB_F_GET(wrb_params->features, VLAN));
890 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
891
892 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
893 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530894 SET_TX_WRB_HDR_BITS(mgmt, hdr,
895 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700896}
897
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000898static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530899 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000900{
901 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500902 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000903
Sathya Perla7101e112010-03-22 20:41:12 +0000904
Sathya Perlaf986afc2015-02-06 08:18:43 -0500905 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
906 (u64)le32_to_cpu(wrb->frag_pa_lo);
907 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000908 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500909 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000910 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500911 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000912 }
913}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700914
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530915/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530916static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530918 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700919
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530920 queue_head_inc(&txo->q);
921 return head;
922}
923
924/* Set up the WRB header for xmit */
925static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
926 struct be_tx_obj *txo,
927 struct be_wrb_params *wrb_params,
928 struct sk_buff *skb, u16 head)
929{
930 u32 num_frags = skb_wrb_cnt(skb);
931 struct be_queue_info *txq = &txo->q;
932 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
933
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530934 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500935 be_dws_cpu_to_le(hdr, sizeof(*hdr));
936
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500937 BUG_ON(txo->sent_skb_list[head]);
938 txo->sent_skb_list[head] = skb;
939 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530940 atomic_add(num_frags, &txq->used);
941 txo->last_req_wrb_cnt = num_frags;
942 txo->pend_wrb_cnt += num_frags;
943}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530945/* Setup a WRB fragment (buffer descriptor) for xmit */
946static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
947 int len)
948{
949 struct be_eth_wrb *wrb;
950 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700951
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530952 wrb = queue_head_node(txq);
953 wrb_fill(wrb, busaddr, len);
954 queue_head_inc(txq);
955}
956
957/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
958 * was invoked. The producer index is restored to the previous packet and the
959 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
960 */
961static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530962 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530963 u32 copied)
964{
965 struct device *dev;
966 struct be_eth_wrb *wrb;
967 struct be_queue_info *txq = &txo->q;
968
969 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500970 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530971
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500972 /* skip the first wrb (hdr); it's not mapped */
973 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000974 while (copied) {
975 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000976 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000977 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500978 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000979 queue_head_inc(txq);
980 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530981
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500982 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530983}
984
985/* Enqueue the given packet for transmit. This routine allocates WRBs for the
986 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
987 * of WRBs used up by the packet.
988 */
989static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
990 struct sk_buff *skb,
991 struct be_wrb_params *wrb_params)
992{
993 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
994 struct device *dev = &adapter->pdev->dev;
995 struct be_queue_info *txq = &txo->q;
996 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530997 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530998 dma_addr_t busaddr;
999 int len;
1000
1001 head = be_tx_get_wrb_hdr(txo);
1002
1003 if (skb->len > skb->data_len) {
1004 len = skb_headlen(skb);
1005
1006 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1007 if (dma_mapping_error(dev, busaddr))
1008 goto dma_err;
1009 map_single = true;
1010 be_tx_setup_wrb_frag(txo, busaddr, len);
1011 copied += len;
1012 }
1013
1014 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1015 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1016 len = skb_frag_size(frag);
1017
1018 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1019 if (dma_mapping_error(dev, busaddr))
1020 goto dma_err;
1021 be_tx_setup_wrb_frag(txo, busaddr, len);
1022 copied += len;
1023 }
1024
1025 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1026
1027 be_tx_stats_update(txo, skb);
1028 return wrb_cnt;
1029
1030dma_err:
1031 adapter->drv_stats.dma_map_errors++;
1032 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +00001033 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001034}
1035
Sathya Perlaf7062ee2015-02-06 08:18:35 -05001036static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1037{
1038 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1039}
1040
Somnath Kotur93040ae2012-06-26 22:32:10 +00001041static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001042 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301043 struct be_wrb_params
1044 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +00001045{
1046 u16 vlan_tag = 0;
1047
1048 skb = skb_share_check(skb, GFP_ATOMIC);
1049 if (unlikely(!skb))
1050 return skb;
1051
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001052 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +00001053 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301054
1055 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1056 if (!vlan_tag)
1057 vlan_tag = adapter->pvid;
1058 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1059 * skip VLAN insertion
1060 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301061 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301062 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001063
1064 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001065 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1066 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001067 if (unlikely(!skb))
1068 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001069 skb->vlan_tci = 0;
1070 }
1071
1072 /* Insert the outer VLAN, if any */
1073 if (adapter->qnq_vid) {
1074 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001075 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1076 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001077 if (unlikely(!skb))
1078 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301079 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001080 }
1081
Somnath Kotur93040ae2012-06-26 22:32:10 +00001082 return skb;
1083}
1084
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001085static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1086{
1087 struct ethhdr *eh = (struct ethhdr *)skb->data;
1088 u16 offset = ETH_HLEN;
1089
1090 if (eh->h_proto == htons(ETH_P_IPV6)) {
1091 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1092
1093 offset += sizeof(struct ipv6hdr);
1094 if (ip6h->nexthdr != NEXTHDR_TCP &&
1095 ip6h->nexthdr != NEXTHDR_UDP) {
1096 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301097 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001098
1099 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1100 if (ehdr->hdrlen == 0xff)
1101 return true;
1102 }
1103 }
1104 return false;
1105}
1106
1107static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1108{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001109 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001110}
1111
Sathya Perla748b5392014-05-09 13:29:13 +05301112static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001113{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001114 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001115}
1116
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301117static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1118 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301119 struct be_wrb_params
1120 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001121{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001122 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001123 unsigned int eth_hdr_len;
1124 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001125
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001126 /* For padded packets, BE HW modifies tot_len field in IP header
1127 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001128 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001129 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001130 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1131 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001132 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001133 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001134 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001135 ip = (struct iphdr *)ip_hdr(skb);
1136 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1137 }
1138
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001139 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301140 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001141 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301142 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001143 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301144 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001145
Somnath Kotur93040ae2012-06-26 22:32:10 +00001146 /* HW has a bug wherein it will calculate CSUM for VLAN
1147 * pkts even though it is disabled.
1148 * Manually insert VLAN in pkt.
1149 */
1150 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001151 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301152 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001153 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301154 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001155 }
1156
1157 /* HW may lockup when VLAN HW tagging is requested on
1158 * certain ipv6 packets. Drop such pkts if the HW workaround to
1159 * skip HW tagging is not enabled by FW.
1160 */
1161 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301162 (adapter->pvid || adapter->qnq_vid) &&
1163 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001164 goto tx_drop;
1165
1166 /* Manual VLAN tag insertion to prevent:
1167 * ASIC lockup when the ASIC inserts VLAN tag into
1168 * certain ipv6 packets. Insert VLAN tags in driver,
1169 * and set event, completion, vlan bits accordingly
1170 * in the Tx WRB.
1171 */
1172 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1173 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301174 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001175 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301176 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001177 }
1178
Sathya Perlaee9c7992013-05-22 23:04:55 +00001179 return skb;
1180tx_drop:
1181 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301182err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001183 return NULL;
1184}
1185
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301186static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1187 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301188 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301189{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301190 int err;
1191
Suresh Reddy8227e992015-10-12 03:47:19 -04001192 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1193 * packets that are 32b or less may cause a transmit stall
1194 * on that port. The workaround is to pad such packets
1195 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301196 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001197 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001198 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301199 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301200 }
1201
1202 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301203 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301204 if (!skb)
1205 return NULL;
1206 }
1207
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301208 /* The stack can send us skbs with length greater than
1209 * what the HW can handle. Trim the extra bytes.
1210 */
1211 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1212 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1213 WARN_ON(err);
1214
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301215 return skb;
1216}
1217
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001218static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1219{
1220 struct be_queue_info *txq = &txo->q;
1221 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1222
1223 /* Mark the last request eventable if it hasn't been marked already */
1224 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1225 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1226
1227 /* compose a dummy wrb if there are odd set of wrbs to notify */
1228 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001229 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001230 queue_head_inc(txq);
1231 atomic_inc(&txq->used);
1232 txo->pend_wrb_cnt++;
1233 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1234 TX_HDR_WRB_NUM_SHIFT);
1235 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1236 TX_HDR_WRB_NUM_SHIFT);
1237 }
1238 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1239 txo->pend_wrb_cnt = 0;
1240}
1241
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301242/* OS2BMC related */
1243
1244#define DHCP_CLIENT_PORT 68
1245#define DHCP_SERVER_PORT 67
1246#define NET_BIOS_PORT1 137
1247#define NET_BIOS_PORT2 138
1248#define DHCPV6_RAS_PORT 547
1249
1250#define is_mc_allowed_on_bmc(adapter, eh) \
1251 (!is_multicast_filt_enabled(adapter) && \
1252 is_multicast_ether_addr(eh->h_dest) && \
1253 !is_broadcast_ether_addr(eh->h_dest))
1254
1255#define is_bc_allowed_on_bmc(adapter, eh) \
1256 (!is_broadcast_filt_enabled(adapter) && \
1257 is_broadcast_ether_addr(eh->h_dest))
1258
1259#define is_arp_allowed_on_bmc(adapter, skb) \
1260 (is_arp(skb) && is_arp_filt_enabled(adapter))
1261
1262#define is_broadcast_packet(eh, adapter) \
1263 (is_multicast_ether_addr(eh->h_dest) && \
1264 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1265
1266#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1267
1268#define is_arp_filt_enabled(adapter) \
1269 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1270
1271#define is_dhcp_client_filt_enabled(adapter) \
1272 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1273
1274#define is_dhcp_srvr_filt_enabled(adapter) \
1275 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1276
1277#define is_nbios_filt_enabled(adapter) \
1278 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1279
1280#define is_ipv6_na_filt_enabled(adapter) \
1281 (adapter->bmc_filt_mask & \
1282 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1283
1284#define is_ipv6_ra_filt_enabled(adapter) \
1285 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1286
1287#define is_ipv6_ras_filt_enabled(adapter) \
1288 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1289
1290#define is_broadcast_filt_enabled(adapter) \
1291 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1292
1293#define is_multicast_filt_enabled(adapter) \
1294 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1295
1296static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1297 struct sk_buff **skb)
1298{
1299 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1300 bool os2bmc = false;
1301
1302 if (!be_is_os2bmc_enabled(adapter))
1303 goto done;
1304
1305 if (!is_multicast_ether_addr(eh->h_dest))
1306 goto done;
1307
1308 if (is_mc_allowed_on_bmc(adapter, eh) ||
1309 is_bc_allowed_on_bmc(adapter, eh) ||
1310 is_arp_allowed_on_bmc(adapter, (*skb))) {
1311 os2bmc = true;
1312 goto done;
1313 }
1314
1315 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1316 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1317 u8 nexthdr = hdr->nexthdr;
1318
1319 if (nexthdr == IPPROTO_ICMPV6) {
1320 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1321
1322 switch (icmp6->icmp6_type) {
1323 case NDISC_ROUTER_ADVERTISEMENT:
1324 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1325 goto done;
1326 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1327 os2bmc = is_ipv6_na_filt_enabled(adapter);
1328 goto done;
1329 default:
1330 break;
1331 }
1332 }
1333 }
1334
1335 if (is_udp_pkt((*skb))) {
1336 struct udphdr *udp = udp_hdr((*skb));
1337
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001338 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301339 case DHCP_CLIENT_PORT:
1340 os2bmc = is_dhcp_client_filt_enabled(adapter);
1341 goto done;
1342 case DHCP_SERVER_PORT:
1343 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1344 goto done;
1345 case NET_BIOS_PORT1:
1346 case NET_BIOS_PORT2:
1347 os2bmc = is_nbios_filt_enabled(adapter);
1348 goto done;
1349 case DHCPV6_RAS_PORT:
1350 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1351 goto done;
1352 default:
1353 break;
1354 }
1355 }
1356done:
1357 /* For packets over a vlan, which are destined
1358 * to BMC, asic expects the vlan to be inline in the packet.
1359 */
1360 if (os2bmc)
1361 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1362
1363 return os2bmc;
1364}
1365
Sathya Perlaee9c7992013-05-22 23:04:55 +00001366static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1367{
1368 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001369 u16 q_idx = skb_get_queue_mapping(skb);
1370 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301371 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301372 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001373 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001374
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301375 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001376 if (unlikely(!skb))
1377 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001378
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301379 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1380
1381 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001382 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001383 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001384 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001385 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001386
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301387 /* if os2bmc is enabled and if the pkt is destined to bmc,
1388 * enqueue the pkt a 2nd time with mgmt bit set.
1389 */
1390 if (be_send_pkt_to_bmc(adapter, &skb)) {
1391 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1392 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1393 if (unlikely(!wrb_cnt))
1394 goto drop;
1395 else
1396 skb_get(skb);
1397 }
1398
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301399 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001400 netif_stop_subqueue(netdev, q_idx);
1401 tx_stats(txo)->tx_stops++;
1402 }
1403
1404 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1405 be_xmit_flush(adapter, txo);
1406
1407 return NETDEV_TX_OK;
1408drop:
1409 tx_stats(txo)->tx_drv_drops++;
1410 /* Flush the already enqueued tx requests */
1411 if (flush && txo->pend_wrb_cnt)
1412 be_xmit_flush(adapter, txo);
1413
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001414 return NETDEV_TX_OK;
1415}
1416
1417static int be_change_mtu(struct net_device *netdev, int new_mtu)
1418{
1419 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301420 struct device *dev = &adapter->pdev->dev;
1421
1422 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1423 dev_info(dev, "MTU must be between %d and %d bytes\n",
1424 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425 return -EINVAL;
1426 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301427
1428 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301429 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 netdev->mtu = new_mtu;
1431 return 0;
1432}
1433
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001434static inline bool be_in_all_promisc(struct be_adapter *adapter)
1435{
1436 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1437 BE_IF_FLAGS_ALL_PROMISCUOUS;
1438}
1439
1440static int be_set_vlan_promisc(struct be_adapter *adapter)
1441{
1442 struct device *dev = &adapter->pdev->dev;
1443 int status;
1444
1445 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1446 return 0;
1447
1448 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1449 if (!status) {
1450 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1451 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1452 } else {
1453 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1454 }
1455 return status;
1456}
1457
1458static int be_clear_vlan_promisc(struct be_adapter *adapter)
1459{
1460 struct device *dev = &adapter->pdev->dev;
1461 int status;
1462
1463 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1464 if (!status) {
1465 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1466 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1467 }
1468 return status;
1469}
1470
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001472 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1473 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474 */
Sathya Perla10329df2012-06-05 19:37:18 +00001475static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476{
Vasundhara Volam50762662014-09-12 17:39:14 +05301477 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001478 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301479 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001480 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001481
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001482 /* No need to change the VLAN state if the I/F is in promiscuous */
1483 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001484 return 0;
1485
Sathya Perla92bf14a2013-08-27 16:57:32 +05301486 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001487 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001488
Somnath Kotur841f60f2016-07-27 05:26:15 -04001489 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1490 status = be_clear_vlan_promisc(adapter);
1491 if (status)
1492 return status;
1493 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001494 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301495 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1496 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001497
Vasundhara Volam435452a2015-03-20 06:28:23 -04001498 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001499 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001500 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001501 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001502 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1503 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301504 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001505 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001506 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001507 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508}
1509
Patrick McHardy80d5c362013-04-19 02:04:28 +00001510static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511{
1512 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001513 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001514
Sathya Perlab7172412016-07-27 05:26:18 -04001515 mutex_lock(&adapter->rx_filter_lock);
1516
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001517 /* Packets with VID 0 are always received by Lancer by default */
1518 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001519 goto done;
Vasundhara Volam48291c22014-03-11 18:53:08 +05301520
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301521 if (test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001522 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001523
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301524 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301525 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001526
Sathya Perlab7172412016-07-27 05:26:18 -04001527 status = be_vid_config(adapter);
1528done:
1529 mutex_unlock(&adapter->rx_filter_lock);
1530 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531}
1532
Patrick McHardy80d5c362013-04-19 02:04:28 +00001533static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001534{
1535 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perlab7172412016-07-27 05:26:18 -04001536 int status = 0;
1537
1538 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001540 /* Packets with VID 0 are always received by Lancer by default */
1541 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001542 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001543
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301544 if (!test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001545 goto done;
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301546
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301547 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301548 adapter->vlans_added--;
1549
Sathya Perlab7172412016-07-27 05:26:18 -04001550 status = be_vid_config(adapter);
1551done:
1552 mutex_unlock(&adapter->rx_filter_lock);
1553 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554}
1555
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001556static void be_set_all_promisc(struct be_adapter *adapter)
1557{
1558 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1559 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1560}
1561
1562static void be_set_mc_promisc(struct be_adapter *adapter)
1563{
1564 int status;
1565
1566 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1567 return;
1568
1569 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1570 if (!status)
1571 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1572}
1573
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001574static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001575{
1576 int status;
1577
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001578 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1579 return;
1580
1581 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001582 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001583 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1584}
1585
1586static void be_clear_uc_promisc(struct be_adapter *adapter)
1587{
1588 int status;
1589
1590 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1591 return;
1592
1593 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1594 if (!status)
1595 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1596}
1597
1598/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1599 * We use a single callback function for both sync and unsync. We really don't
1600 * add/remove addresses through this callback. But, we use it to detect changes
1601 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1602 */
1603static int be_uc_list_update(struct net_device *netdev,
1604 const unsigned char *addr)
1605{
1606 struct be_adapter *adapter = netdev_priv(netdev);
1607
1608 adapter->update_uc_list = true;
1609 return 0;
1610}
1611
1612static int be_mc_list_update(struct net_device *netdev,
1613 const unsigned char *addr)
1614{
1615 struct be_adapter *adapter = netdev_priv(netdev);
1616
1617 adapter->update_mc_list = true;
1618 return 0;
1619}
1620
1621static void be_set_mc_list(struct be_adapter *adapter)
1622{
1623 struct net_device *netdev = adapter->netdev;
Sathya Perlab7172412016-07-27 05:26:18 -04001624 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001625 bool mc_promisc = false;
1626 int status;
1627
Sathya Perlab7172412016-07-27 05:26:18 -04001628 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001629 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1630
1631 if (netdev->flags & IFF_PROMISC) {
1632 adapter->update_mc_list = false;
1633 } else if (netdev->flags & IFF_ALLMULTI ||
1634 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1635 /* Enable multicast promisc if num configured exceeds
1636 * what we support
1637 */
1638 mc_promisc = true;
1639 adapter->update_mc_list = false;
1640 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1641 /* Update mc-list unconditionally if the iface was previously
1642 * in mc-promisc mode and now is out of that mode.
1643 */
1644 adapter->update_mc_list = true;
1645 }
1646
Sathya Perlab7172412016-07-27 05:26:18 -04001647 if (adapter->update_mc_list) {
1648 int i = 0;
1649
1650 /* cache the mc-list in adapter */
1651 netdev_for_each_mc_addr(ha, netdev) {
1652 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1653 i++;
1654 }
1655 adapter->mc_count = netdev_mc_count(netdev);
1656 }
1657 netif_addr_unlock_bh(netdev);
1658
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001659 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001660 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001661 } else if (adapter->update_mc_list) {
1662 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1663 if (!status)
1664 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1665 else
1666 be_set_mc_promisc(adapter);
1667
1668 adapter->update_mc_list = false;
1669 }
1670}
1671
1672static void be_clear_mc_list(struct be_adapter *adapter)
1673{
1674 struct net_device *netdev = adapter->netdev;
1675
1676 __dev_mc_unsync(netdev, NULL);
1677 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlab7172412016-07-27 05:26:18 -04001678 adapter->mc_count = 0;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001679}
1680
Suresh Reddy988d44b2016-09-07 19:57:52 +05301681static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1682{
Ivan Vecerab4c30222017-01-06 20:30:02 +01001683 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05301684 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1685 return 0;
1686 }
1687
Ivan Vecerab4c30222017-01-06 20:30:02 +01001688 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
Suresh Reddy988d44b2016-09-07 19:57:52 +05301689 adapter->if_handle,
1690 &adapter->pmac_id[uc_idx + 1], 0);
1691}
1692
1693static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1694{
1695 if (pmac_id == adapter->pmac_id[0])
1696 return;
1697
1698 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1699}
1700
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001701static void be_set_uc_list(struct be_adapter *adapter)
1702{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001703 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001704 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001705 bool uc_promisc = false;
Sathya Perlab7172412016-07-27 05:26:18 -04001706 int curr_uc_macs = 0, i;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001707
Sathya Perlab7172412016-07-27 05:26:18 -04001708 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001709 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001710
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001711 if (netdev->flags & IFF_PROMISC) {
1712 adapter->update_uc_list = false;
1713 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1714 uc_promisc = true;
1715 adapter->update_uc_list = false;
1716 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1717 /* Update uc-list unconditionally if the iface was previously
1718 * in uc-promisc mode and now is out of that mode.
1719 */
1720 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001721 }
1722
Sathya Perlab7172412016-07-27 05:26:18 -04001723 if (adapter->update_uc_list) {
Sathya Perlab7172412016-07-27 05:26:18 -04001724 /* cache the uc-list in adapter array */
Ivan Vecerae3a252a2017-01-06 21:59:30 +01001725 i = 0;
Sathya Perlab7172412016-07-27 05:26:18 -04001726 netdev_for_each_uc_addr(ha, netdev) {
1727 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1728 i++;
1729 }
1730 curr_uc_macs = netdev_uc_count(netdev);
1731 }
1732 netif_addr_unlock_bh(netdev);
1733
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001734 if (uc_promisc) {
1735 be_set_uc_promisc(adapter);
1736 } else if (adapter->update_uc_list) {
1737 be_clear_uc_promisc(adapter);
1738
Sathya Perlab7172412016-07-27 05:26:18 -04001739 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301740 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001741
Sathya Perlab7172412016-07-27 05:26:18 -04001742 for (i = 0; i < curr_uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301743 be_uc_mac_add(adapter, i);
Sathya Perlab7172412016-07-27 05:26:18 -04001744 adapter->uc_macs = curr_uc_macs;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001745 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001746 }
1747}
1748
1749static void be_clear_uc_list(struct be_adapter *adapter)
1750{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001751 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001752 int i;
1753
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001754 __dev_uc_unsync(netdev, NULL);
Sathya Perlab7172412016-07-27 05:26:18 -04001755 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301756 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1757
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001758 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301759}
1760
Sathya Perlab7172412016-07-27 05:26:18 -04001761static void __be_set_rx_mode(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762{
Sathya Perlab7172412016-07-27 05:26:18 -04001763 struct net_device *netdev = adapter->netdev;
1764
1765 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001766
1767 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001768 if (!be_in_all_promisc(adapter))
1769 be_set_all_promisc(adapter);
1770 } else if (be_in_all_promisc(adapter)) {
1771 /* We need to re-program the vlan-list or clear
1772 * vlan-promisc mode (if needed) when the interface
1773 * comes out of promisc mode.
1774 */
1775 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001777
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001778 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001779 be_set_mc_list(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04001780
1781 mutex_unlock(&adapter->rx_filter_lock);
1782}
1783
1784static void be_work_set_rx_mode(struct work_struct *work)
1785{
1786 struct be_cmd_work *cmd_work =
1787 container_of(work, struct be_cmd_work, work);
1788
1789 __be_set_rx_mode(cmd_work->adapter);
1790 kfree(cmd_work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791}
1792
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001793static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1794{
1795 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001796 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001797 int status;
1798
Sathya Perla11ac75e2011-12-13 00:58:50 +00001799 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001800 return -EPERM;
1801
Sathya Perla11ac75e2011-12-13 00:58:50 +00001802 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001803 return -EINVAL;
1804
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301805 /* Proceed further only if user provided MAC is different
1806 * from active MAC
1807 */
1808 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1809 return 0;
1810
Sathya Perla3175d8c2013-07-23 15:25:03 +05301811 if (BEx_chip(adapter)) {
1812 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1813 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001814
Sathya Perla11ac75e2011-12-13 00:58:50 +00001815 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1816 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301817 } else {
1818 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1819 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001820 }
1821
Kalesh APabccf232014-07-17 16:20:24 +05301822 if (status) {
1823 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1824 mac, vf, status);
1825 return be_cmd_status(status);
1826 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001827
Kalesh APabccf232014-07-17 16:20:24 +05301828 ether_addr_copy(vf_cfg->mac_addr, mac);
1829
1830 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001831}
1832
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001833static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301834 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001835{
1836 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001837 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001838
Sathya Perla11ac75e2011-12-13 00:58:50 +00001839 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001840 return -EPERM;
1841
Sathya Perla11ac75e2011-12-13 00:58:50 +00001842 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001843 return -EINVAL;
1844
1845 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001846 vi->max_tx_rate = vf_cfg->tx_rate;
1847 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001848 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1849 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001850 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301851 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001852 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001853
1854 return 0;
1855}
1856
Vasundhara Volam435452a2015-03-20 06:28:23 -04001857static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1858{
1859 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1860 u16 vids[BE_NUM_VLANS_SUPPORTED];
1861 int vf_if_id = vf_cfg->if_handle;
1862 int status;
1863
1864 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001865 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001866 if (status)
1867 return status;
1868
1869 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1870 vids[0] = 0;
1871 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1872 if (!status)
1873 dev_info(&adapter->pdev->dev,
1874 "Cleared guest VLANs on VF%d", vf);
1875
1876 /* After TVT is enabled, disallow VFs to program VLAN filters */
1877 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1878 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1879 ~BE_PRIV_FILTMGMT, vf + 1);
1880 if (!status)
1881 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1882 }
1883 return 0;
1884}
1885
1886static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1887{
1888 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1889 struct device *dev = &adapter->pdev->dev;
1890 int status;
1891
1892 /* Reset Transparent VLAN Tagging. */
1893 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001894 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001895 if (status)
1896 return status;
1897
1898 /* Allow VFs to program VLAN filtering */
1899 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1900 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1901 BE_PRIV_FILTMGMT, vf + 1);
1902 if (!status) {
1903 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1904 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1905 }
1906 }
1907
1908 dev_info(dev,
1909 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1910 return 0;
1911}
1912
Moshe Shemesh79aab092016-09-22 12:11:15 +03001913static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1914 __be16 vlan_proto)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001915{
1916 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001917 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001918 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001919
Sathya Perla11ac75e2011-12-13 00:58:50 +00001920 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001921 return -EPERM;
1922
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001923 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001924 return -EINVAL;
1925
Moshe Shemesh79aab092016-09-22 12:11:15 +03001926 if (vlan_proto != htons(ETH_P_8021Q))
1927 return -EPROTONOSUPPORT;
1928
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001929 if (vlan || qos) {
1930 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001931 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001932 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001933 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001934 }
1935
Kalesh APabccf232014-07-17 16:20:24 +05301936 if (status) {
1937 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001938 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1939 status);
Kalesh APabccf232014-07-17 16:20:24 +05301940 return be_cmd_status(status);
1941 }
1942
1943 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301944 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001945}
1946
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001947static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1948 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001949{
1950 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301951 struct device *dev = &adapter->pdev->dev;
1952 int percent_rate, status = 0;
1953 u16 link_speed = 0;
1954 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001955
Sathya Perla11ac75e2011-12-13 00:58:50 +00001956 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001957 return -EPERM;
1958
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001959 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001960 return -EINVAL;
1961
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001962 if (min_tx_rate)
1963 return -EINVAL;
1964
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301965 if (!max_tx_rate)
1966 goto config_qos;
1967
1968 status = be_cmd_link_status_query(adapter, &link_speed,
1969 &link_status, 0);
1970 if (status)
1971 goto err;
1972
1973 if (!link_status) {
1974 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301975 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301976 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001977 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001978
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301979 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1980 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1981 link_speed);
1982 status = -EINVAL;
1983 goto err;
1984 }
1985
1986 /* On Skyhawk the QOS setting must be done only as a % value */
1987 percent_rate = link_speed / 100;
1988 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1989 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1990 percent_rate);
1991 status = -EINVAL;
1992 goto err;
1993 }
1994
1995config_qos:
1996 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001997 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301998 goto err;
1999
2000 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
2001 return 0;
2002
2003err:
2004 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
2005 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05302006 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002007}
Kalesh APe2fb1af2014-09-19 15:46:58 +05302008
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302009static int be_set_vf_link_state(struct net_device *netdev, int vf,
2010 int link_state)
2011{
2012 struct be_adapter *adapter = netdev_priv(netdev);
2013 int status;
2014
2015 if (!sriov_enabled(adapter))
2016 return -EPERM;
2017
2018 if (vf >= adapter->num_vfs)
2019 return -EINVAL;
2020
2021 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05302022 if (status) {
2023 dev_err(&adapter->pdev->dev,
2024 "Link state change on VF %d failed: %#x\n", vf, status);
2025 return be_cmd_status(status);
2026 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302027
Kalesh APabccf232014-07-17 16:20:24 +05302028 adapter->vf_cfg[vf].plink_tracking = link_state;
2029
2030 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302031}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002032
Kalesh APe7bcbd72015-05-06 05:30:32 -04002033static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2034{
2035 struct be_adapter *adapter = netdev_priv(netdev);
2036 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2037 u8 spoofchk;
2038 int status;
2039
2040 if (!sriov_enabled(adapter))
2041 return -EPERM;
2042
2043 if (vf >= adapter->num_vfs)
2044 return -EINVAL;
2045
2046 if (BEx_chip(adapter))
2047 return -EOPNOTSUPP;
2048
2049 if (enable == vf_cfg->spoofchk)
2050 return 0;
2051
2052 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2053
2054 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2055 0, spoofchk);
2056 if (status) {
2057 dev_err(&adapter->pdev->dev,
2058 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2059 return be_cmd_status(status);
2060 }
2061
2062 vf_cfg->spoofchk = enable;
2063 return 0;
2064}
2065
Sathya Perla2632baf2013-10-01 16:00:00 +05302066static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2067 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068{
Sathya Perla2632baf2013-10-01 16:00:00 +05302069 aic->rx_pkts_prev = rx_pkts;
2070 aic->tx_reqs_prev = tx_pkts;
2071 aic->jiffies = now;
2072}
Sathya Perlaac124ff2011-07-25 19:10:14 +00002073
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002074static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05302075{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002076 struct be_adapter *adapter = eqo->adapter;
2077 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05302078 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05302079 struct be_rx_obj *rxo;
2080 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002081 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05302082 ulong now;
2083 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002084 int i;
2085
2086 aic = &adapter->aic_obj[eqo->idx];
2087 if (!aic->enable) {
2088 if (aic->jiffies)
2089 aic->jiffies = 0;
2090 eqd = aic->et_eqd;
2091 return eqd;
2092 }
2093
2094 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2095 do {
2096 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2097 rx_pkts += rxo->stats.rx_pkts;
2098 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2099 }
2100
2101 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2102 do {
2103 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2104 tx_pkts += txo->stats.tx_reqs;
2105 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2106 }
2107
2108 /* Skip, if wrapped around or first calculation */
2109 now = jiffies;
2110 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2111 rx_pkts < aic->rx_pkts_prev ||
2112 tx_pkts < aic->tx_reqs_prev) {
2113 be_aic_update(aic, rx_pkts, tx_pkts, now);
2114 return aic->prev_eqd;
2115 }
2116
2117 delta = jiffies_to_msecs(now - aic->jiffies);
2118 if (delta == 0)
2119 return aic->prev_eqd;
2120
2121 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2122 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2123 eqd = (pps / 15000) << 2;
2124
2125 if (eqd < 8)
2126 eqd = 0;
2127 eqd = min_t(u32, eqd, aic->max_eqd);
2128 eqd = max_t(u32, eqd, aic->min_eqd);
2129
2130 be_aic_update(aic, rx_pkts, tx_pkts, now);
2131
2132 return eqd;
2133}
2134
2135/* For Skyhawk-R only */
2136static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2137{
2138 struct be_adapter *adapter = eqo->adapter;
2139 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2140 ulong now = jiffies;
2141 int eqd;
2142 u32 mult_enc;
2143
2144 if (!aic->enable)
2145 return 0;
2146
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302147 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002148 eqd = aic->prev_eqd;
2149 else
2150 eqd = be_get_new_eqd(eqo);
2151
2152 if (eqd > 100)
2153 mult_enc = R2I_DLY_ENC_1;
2154 else if (eqd > 60)
2155 mult_enc = R2I_DLY_ENC_2;
2156 else if (eqd > 20)
2157 mult_enc = R2I_DLY_ENC_3;
2158 else
2159 mult_enc = R2I_DLY_ENC_0;
2160
2161 aic->prev_eqd = eqd;
2162
2163 return mult_enc;
2164}
2165
2166void be_eqd_update(struct be_adapter *adapter, bool force_update)
2167{
2168 struct be_set_eqd set_eqd[MAX_EVT_QS];
2169 struct be_aic_obj *aic;
2170 struct be_eq_obj *eqo;
2171 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002172
Sathya Perla2632baf2013-10-01 16:00:00 +05302173 for_all_evt_queues(adapter, eqo, i) {
2174 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002175 eqd = be_get_new_eqd(eqo);
2176 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302177 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2178 set_eqd[num].eq_id = eqo->q.id;
2179 aic->prev_eqd = eqd;
2180 num++;
2181 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002182 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302183
2184 if (num)
2185 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002186}
2187
Sathya Perla3abcded2010-10-03 22:12:27 -07002188static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302189 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002190{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002191 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002192
Sathya Perlaab1594e2011-07-25 19:10:15 +00002193 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002194 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002195 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002196 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302197 if (rxcp->tunneled)
2198 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002199 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002200 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002201 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002202 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002203 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204}
2205
Sathya Perla2e588f82011-03-11 02:49:26 +00002206static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002207{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002208 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302209 * Also ignore ipcksm for ipv6 pkts
2210 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002211 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302212 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002213}
2214
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302215static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002219 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302220 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221
Sathya Perla3abcded2010-10-03 22:12:27 -07002222 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223 BUG_ON(!rx_page_info->page);
2224
Sathya Perlae50287b2014-03-04 12:14:38 +05302225 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002226 dma_unmap_page(&adapter->pdev->dev,
2227 dma_unmap_addr(rx_page_info, bus),
2228 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302229 rx_page_info->last_frag = false;
2230 } else {
2231 dma_sync_single_for_cpu(&adapter->pdev->dev,
2232 dma_unmap_addr(rx_page_info, bus),
2233 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002234 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302236 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237 atomic_dec(&rxq->used);
2238 return rx_page_info;
2239}
2240
2241/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002242static void be_rx_compl_discard(struct be_rx_obj *rxo,
2243 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002246 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002248 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302249 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002250 put_page(page_info->page);
2251 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252 }
2253}
2254
2255/*
2256 * skb_fill_rx_data forms a complete skb for an ether frame
2257 * indicated by rxcp.
2258 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002259static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2260 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002261{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002262 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002263 u16 i, j;
2264 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265 u8 *start;
2266
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302267 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268 start = page_address(page_info->page) + page_info->page_offset;
2269 prefetch(start);
2270
2271 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002272 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274 skb->len = curr_frag_len;
2275 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002276 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277 /* Complete packet has now been moved to data */
2278 put_page(page_info->page);
2279 skb->data_len = 0;
2280 skb->tail += curr_frag_len;
2281 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002282 hdr_len = ETH_HLEN;
2283 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002285 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286 skb_shinfo(skb)->frags[0].page_offset =
2287 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302288 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2289 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002291 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002292 skb->tail += hdr_len;
2293 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002294 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295
Sathya Perla2e588f82011-03-11 02:49:26 +00002296 if (rxcp->pkt_size <= rx_frag_size) {
2297 BUG_ON(rxcp->num_rcvd != 1);
2298 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299 }
2300
2301 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002302 remaining = rxcp->pkt_size - curr_frag_len;
2303 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302304 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002305 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002307 /* Coalesce all frags from the same physical page in one slot */
2308 if (page_info->page_offset == 0) {
2309 /* Fresh page */
2310 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002311 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002312 skb_shinfo(skb)->frags[j].page_offset =
2313 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002314 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002315 skb_shinfo(skb)->nr_frags++;
2316 } else {
2317 put_page(page_info->page);
2318 }
2319
Eric Dumazet9e903e02011-10-18 21:00:24 +00002320 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321 skb->len += curr_frag_len;
2322 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002323 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002324 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002325 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002327 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328}
2329
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002330/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302331static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002332 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002335 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002336 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002337
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002338 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002339 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002340 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002341 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342 return;
2343 }
2344
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002345 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002347 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002348 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002349 else
2350 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002352 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002353 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002354 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002355 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302356
Tom Herbertb6c0e892014-08-27 21:27:17 -07002357 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302358 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002359
Jiri Pirko343e43c2011-08-25 02:50:51 +00002360 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002361 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002362
2363 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002364}
2365
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002366/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002367static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2368 struct napi_struct *napi,
2369 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002370{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002371 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002373 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002374 u16 remaining, curr_frag_len;
2375 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002376
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002377 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002378 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002380 return;
2381 }
2382
Sathya Perla2e588f82011-03-11 02:49:26 +00002383 remaining = rxcp->pkt_size;
2384 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302385 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386
2387 curr_frag_len = min(remaining, rx_frag_size);
2388
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002389 /* Coalesce all frags from the same physical page in one slot */
2390 if (i == 0 || page_info->page_offset == 0) {
2391 /* First frag or Fresh page */
2392 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002393 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002394 skb_shinfo(skb)->frags[j].page_offset =
2395 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002396 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002397 } else {
2398 put_page(page_info->page);
2399 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002400 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002401 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002402 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002403 memset(page_info, 0, sizeof(*page_info));
2404 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002405 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002407 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002408 skb->len = rxcp->pkt_size;
2409 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002410 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002411 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002412 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002413 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302414
Tom Herbertb6c0e892014-08-27 21:27:17 -07002415 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002416
Jiri Pirko343e43c2011-08-25 02:50:51 +00002417 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002418 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002419
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002420 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002421}
2422
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002423static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2424 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002425{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302426 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2427 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2428 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2429 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2430 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2431 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2432 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2433 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2434 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2435 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2436 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002437 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302438 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2439 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002440 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302441 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302442 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302443 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002444}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002445
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002446static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2447 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002448{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302449 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2450 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2451 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2452 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2453 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2454 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2455 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2456 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2457 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2458 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2459 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002460 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302461 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2462 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002463 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302464 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2465 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002466}
2467
2468static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2469{
2470 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2471 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2472 struct be_adapter *adapter = rxo->adapter;
2473
2474 /* For checking the valid bit it is Ok to use either definition as the
2475 * valid bit is at the same position in both v0 and v1 Rx compl */
2476 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002477 return NULL;
2478
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002479 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002480 be_dws_le_to_cpu(compl, sizeof(*compl));
2481
2482 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002483 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002484 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002485 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002486
Somnath Koture38b1702013-05-29 22:55:56 +00002487 if (rxcp->ip_frag)
2488 rxcp->l4_csum = 0;
2489
Sathya Perla15d72182011-03-21 20:49:26 +00002490 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302491 /* In QNQ modes, if qnq bit is not set, then the packet was
2492 * tagged only with the transparent outer vlan-tag and must
2493 * not be treated as a vlan packet by host
2494 */
2495 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002496 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002497
Sathya Perla15d72182011-03-21 20:49:26 +00002498 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002499 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002500
Somnath Kotur939cf302011-08-18 21:51:49 -07002501 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302502 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002503 rxcp->vlanf = 0;
2504 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002505
2506 /* As the compl has been parsed, reset it; we wont touch it again */
2507 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508
Sathya Perla3abcded2010-10-03 22:12:27 -07002509 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510 return rxcp;
2511}
2512
Eric Dumazet1829b082011-03-01 05:48:12 +00002513static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002515 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002516
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002517 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002518 gfp |= __GFP_COMP;
2519 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002520}
2521
2522/*
2523 * Allocate a page, split it to fragments of size rx_frag_size and post as
2524 * receive buffers to BE
2525 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302526static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002527{
Sathya Perla3abcded2010-10-03 22:12:27 -07002528 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002529 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002530 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002531 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002532 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533 struct be_eth_rx_d *rxd;
2534 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302535 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002536
Sathya Perla3abcded2010-10-03 22:12:27 -07002537 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302538 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002539 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002540 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002542 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002543 break;
2544 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002545 page_dmaaddr = dma_map_page(dev, pagep, 0,
2546 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002547 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002548 if (dma_mapping_error(dev, page_dmaaddr)) {
2549 put_page(pagep);
2550 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302551 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002552 break;
2553 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302554 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002555 } else {
2556 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302557 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002558 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302559 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002560 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002561
2562 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302563 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002564 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2565 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002566
2567 /* Any space left in the current big page for another frag? */
2568 if ((page_offset + rx_frag_size + rx_frag_size) >
2569 adapter->big_page_size) {
2570 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302571 page_info->last_frag = true;
2572 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2573 } else {
2574 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002575 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002576
2577 prev_page_info = page_info;
2578 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002579 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002580 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302581
2582 /* Mark the last frag of a page when we break out of the above loop
2583 * with no more slots available in the RXQ
2584 */
2585 if (pagep) {
2586 prev_page_info->last_frag = true;
2587 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2588 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002589
2590 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302592 if (rxo->rx_post_starved)
2593 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302594 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002595 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302596 be_rxq_notify(adapter, rxq->id, notify);
2597 posted -= notify;
2598 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002599 } else if (atomic_read(&rxq->used) == 0) {
2600 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002601 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002602 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002603}
2604
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302605static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002606{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302607 struct be_queue_info *tx_cq = &txo->cq;
2608 struct be_tx_compl_info *txcp = &txo->txcp;
2609 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002610
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302611 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002612 return NULL;
2613
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302614 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002615 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302616 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002617
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302618 txcp->status = GET_TX_COMPL_BITS(status, compl);
2619 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002620
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302621 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002622 queue_tail_inc(tx_cq);
2623 return txcp;
2624}
2625
Sathya Perla3c8def92011-06-12 20:01:58 +00002626static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302627 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002628{
Sathya Perla3c8def92011-06-12 20:01:58 +00002629 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002630 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002631 struct sk_buff *skb = NULL;
2632 bool unmap_skb_hdr = false;
2633 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302634 u16 num_wrbs = 0;
2635 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002636
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002637 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002638 if (sent_skbs[txq->tail]) {
2639 /* Free skb from prev req */
2640 if (skb)
2641 dev_consume_skb_any(skb);
2642 skb = sent_skbs[txq->tail];
2643 sent_skbs[txq->tail] = NULL;
2644 queue_tail_inc(txq); /* skip hdr wrb */
2645 num_wrbs++;
2646 unmap_skb_hdr = true;
2647 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002648 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002649 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002650 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002651 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002652 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002653 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002654 num_wrbs++;
2655 } while (frag_index != last_index);
2656 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002657
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002658 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002659}
2660
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002661/* Return the number of events in the event queue */
2662static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002663{
2664 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002665 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002666
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002667 do {
2668 eqe = queue_tail_node(&eqo->q);
2669 if (eqe->evt == 0)
2670 break;
2671
2672 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002673 eqe->evt = 0;
2674 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002675 queue_tail_inc(&eqo->q);
2676 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002677
2678 return num;
2679}
2680
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002681/* Leaves the EQ is disarmed state */
2682static void be_eq_clean(struct be_eq_obj *eqo)
2683{
2684 int num = events_get(eqo);
2685
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002686 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002687}
2688
Kalesh AP99b44302015-08-05 03:27:49 -04002689/* Free posted rx buffers that were not used */
2690static void be_rxq_clean(struct be_rx_obj *rxo)
2691{
2692 struct be_queue_info *rxq = &rxo->q;
2693 struct be_rx_page_info *page_info;
2694
2695 while (atomic_read(&rxq->used) > 0) {
2696 page_info = get_rx_page_info(rxo);
2697 put_page(page_info->page);
2698 memset(page_info, 0, sizeof(*page_info));
2699 }
2700 BUG_ON(atomic_read(&rxq->used));
2701 rxq->tail = 0;
2702 rxq->head = 0;
2703}
2704
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002705static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706{
Sathya Perla3abcded2010-10-03 22:12:27 -07002707 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002708 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002709 struct be_adapter *adapter = rxo->adapter;
2710 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002711
Sathya Perlad23e9462012-12-17 19:38:51 +00002712 /* Consume pending rx completions.
2713 * Wait for the flush completion (identified by zero num_rcvd)
2714 * to arrive. Notify CQ even when there are no more CQ entries
2715 * for HW to flush partially coalesced CQ entries.
2716 * In Lancer, there is no need to wait for flush compl.
2717 */
2718 for (;;) {
2719 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302720 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002721 if (lancer_chip(adapter))
2722 break;
2723
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302724 if (flush_wait++ > 50 ||
2725 be_check_error(adapter,
2726 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002727 dev_warn(&adapter->pdev->dev,
2728 "did not receive flush compl\n");
2729 break;
2730 }
2731 be_cq_notify(adapter, rx_cq->id, true, 0);
2732 mdelay(1);
2733 } else {
2734 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002735 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002736 if (rxcp->num_rcvd == 0)
2737 break;
2738 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002739 }
2740
Sathya Perlad23e9462012-12-17 19:38:51 +00002741 /* After cleanup, leave the CQ in unarmed state */
2742 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002743}
2744
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002745static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002746{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002747 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302748 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302749 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002750 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302751 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302752 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002753 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302755 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002756 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002757 pending_txqs = adapter->num_tx_qs;
2758
2759 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302760 cmpl = 0;
2761 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002762 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302763 while ((txcp = be_tx_compl_get(txo))) {
2764 num_wrbs +=
2765 be_tx_compl_process(adapter, txo,
2766 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002767 cmpl++;
2768 }
2769 if (cmpl) {
2770 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2771 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302772 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002773 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302774 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002775 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002776 }
2777
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302778 if (pending_txqs == 0 || ++timeo > 10 ||
2779 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002780 break;
2781
2782 mdelay(1);
2783 } while (true);
2784
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002785 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002786 for_all_tx_queues(adapter, txo, i) {
2787 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002788
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002789 if (atomic_read(&txq->used)) {
2790 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2791 i, atomic_read(&txq->used));
2792 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002793 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002794 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2795 txq->len);
2796 /* Use the tx-compl process logic to handle requests
2797 * that were not sent to the HW.
2798 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002799 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2800 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002801 BUG_ON(atomic_read(&txq->used));
2802 txo->pend_wrb_cnt = 0;
2803 /* Since hw was never notified of these requests,
2804 * reset TXQ indices
2805 */
2806 txq->head = notified_idx;
2807 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002808 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002809 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002810}
2811
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002812static void be_evt_queues_destroy(struct be_adapter *adapter)
2813{
2814 struct be_eq_obj *eqo;
2815 int i;
2816
2817 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002818 if (eqo->q.created) {
2819 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302821 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002822 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002823 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002824 be_queue_free(adapter, &eqo->q);
2825 }
2826}
2827
2828static int be_evt_queues_create(struct be_adapter *adapter)
2829{
2830 struct be_queue_info *eq;
2831 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302832 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002833 int i, rc;
2834
Sathya Perlae2617682016-06-22 08:54:54 -04002835 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302836 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002837 max(adapter->cfg_num_rx_irqs,
2838 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002839
2840 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302841 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002842
Sathya Perla2632baf2013-10-01 16:00:00 +05302843 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002844 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002845 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302846 aic->max_eqd = BE_MAX_EQD;
2847 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002848
2849 eq = &eqo->q;
2850 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302851 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002852 if (rc)
2853 return rc;
2854
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302855 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002856 if (rc)
2857 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002858
2859 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2860 return -ENOMEM;
2861 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2862 eqo->affinity_mask);
2863 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2864 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002865 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002866 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002867}
2868
Sathya Perla5fb379e2009-06-18 00:02:59 +00002869static void be_mcc_queues_destroy(struct be_adapter *adapter)
2870{
2871 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002872
Sathya Perla8788fdc2009-07-27 22:52:03 +00002873 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002874 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002875 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002876 be_queue_free(adapter, q);
2877
Sathya Perla8788fdc2009-07-27 22:52:03 +00002878 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002879 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002880 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002881 be_queue_free(adapter, q);
2882}
2883
2884/* Must be called only after TX qs are created as MCC shares TX EQ */
2885static int be_mcc_queues_create(struct be_adapter *adapter)
2886{
2887 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002888
Sathya Perla8788fdc2009-07-27 22:52:03 +00002889 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002890 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302891 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002892 goto err;
2893
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002894 /* Use the default EQ for MCC completions */
2895 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002896 goto mcc_cq_free;
2897
Sathya Perla8788fdc2009-07-27 22:52:03 +00002898 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002899 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2900 goto mcc_cq_destroy;
2901
Sathya Perla8788fdc2009-07-27 22:52:03 +00002902 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002903 goto mcc_q_free;
2904
2905 return 0;
2906
2907mcc_q_free:
2908 be_queue_free(adapter, q);
2909mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002910 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002911mcc_cq_free:
2912 be_queue_free(adapter, cq);
2913err:
2914 return -1;
2915}
2916
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002917static void be_tx_queues_destroy(struct be_adapter *adapter)
2918{
2919 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002920 struct be_tx_obj *txo;
2921 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002922
Sathya Perla3c8def92011-06-12 20:01:58 +00002923 for_all_tx_queues(adapter, txo, i) {
2924 q = &txo->q;
2925 if (q->created)
2926 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2927 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002928
Sathya Perla3c8def92011-06-12 20:01:58 +00002929 q = &txo->cq;
2930 if (q->created)
2931 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2932 be_queue_free(adapter, q);
2933 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002934}
2935
Sathya Perla77071332013-08-27 16:57:34 +05302936static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937{
Sathya Perla73f394e2015-03-26 03:05:09 -04002938 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002939 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002940 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302941 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002942
Sathya Perlae2617682016-06-22 08:54:54 -04002943 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002944
Sathya Perla3c8def92011-06-12 20:01:58 +00002945 for_all_tx_queues(adapter, txo, i) {
2946 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002947 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2948 sizeof(struct be_eth_tx_compl));
2949 if (status)
2950 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002951
John Stultz827da442013-10-07 15:51:58 -07002952 u64_stats_init(&txo->stats.sync);
2953 u64_stats_init(&txo->stats.sync_compl);
2954
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002955 /* If num_evt_qs is less than num_tx_qs, then more than
2956 * one txq share an eq
2957 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002958 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2959 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 if (status)
2961 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002962
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002963 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2964 sizeof(struct be_eth_wrb));
2965 if (status)
2966 return status;
2967
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002968 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002969 if (status)
2970 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002971
2972 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2973 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002974 }
2975
Sathya Perlad3791422012-09-28 04:39:44 +00002976 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2977 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002978 return 0;
2979}
2980
2981static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002982{
2983 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002984 struct be_rx_obj *rxo;
2985 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002986
Sathya Perla3abcded2010-10-03 22:12:27 -07002987 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002988 q = &rxo->cq;
2989 if (q->created)
2990 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2991 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002992 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002993}
2994
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002995static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002996{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002997 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002998 struct be_rx_obj *rxo;
2999 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003000
Sathya Perlae2617682016-06-22 08:54:54 -04003001 adapter->num_rss_qs =
3002 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303003
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003004 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04003005 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003006 adapter->num_rss_qs = 0;
3007
3008 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3009
3010 /* When the interface is not capable of RSS rings (and there is no
3011 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003012 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003013 if (adapter->num_rx_qs == 0)
3014 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303015
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003016 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07003017 for_all_rx_queues(adapter, rxo, i) {
3018 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07003019 cq = &rxo->cq;
3020 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05303021 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07003022 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003023 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003024
John Stultz827da442013-10-07 15:51:58 -07003025 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003026 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3027 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07003028 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003029 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07003030 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003031
Sathya Perlad3791422012-09-28 04:39:44 +00003032 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003033 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003034 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00003035}
3036
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003037static irqreturn_t be_intx(int irq, void *dev)
3038{
Sathya Perlae49cc342012-11-27 19:50:02 +00003039 struct be_eq_obj *eqo = dev;
3040 struct be_adapter *adapter = eqo->adapter;
3041 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003042
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003043 /* IRQ is not expected when NAPI is scheduled as the EQ
3044 * will not be armed.
3045 * But, this can happen on Lancer INTx where it takes
3046 * a while to de-assert INTx or in BE2 where occasionaly
3047 * an interrupt may be raised even when EQ is unarmed.
3048 * If NAPI is already scheduled, then counting & notifying
3049 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00003050 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003051 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003052 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003053 __napi_schedule(&eqo->napi);
3054 if (num_evts)
3055 eqo->spurious_intr = 0;
3056 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003057 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003058
3059 /* Return IRQ_HANDLED only for the the first spurious intr
3060 * after a valid intr to stop the kernel from branding
3061 * this irq as a bad one!
3062 */
3063 if (num_evts || eqo->spurious_intr++ == 0)
3064 return IRQ_HANDLED;
3065 else
3066 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067}
3068
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003069static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003071 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003072
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003073 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00003074 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003075 return IRQ_HANDLED;
3076}
3077
Sathya Perla2e588f82011-03-11 02:49:26 +00003078static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003079{
Somnath Koture38b1702013-05-29 22:55:56 +00003080 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003081}
3082
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003083static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05303084 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003085{
Sathya Perla3abcded2010-10-03 22:12:27 -07003086 struct be_adapter *adapter = rxo->adapter;
3087 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00003088 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003089 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05303090 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003091
3092 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07003093 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003094 if (!rxcp)
3095 break;
3096
Sathya Perla12004ae2011-08-02 19:57:46 +00003097 /* Is it a flush compl that has no data */
3098 if (unlikely(rxcp->num_rcvd == 0))
3099 goto loop_continue;
3100
3101 /* Discard compl with partial DMA Lancer B0 */
3102 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003103 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003104 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00003105 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00003106
Sathya Perla12004ae2011-08-02 19:57:46 +00003107 /* On BE drop pkts that arrive due to imperfect filtering in
3108 * promiscuous mode on some skews
3109 */
3110 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05303111 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003112 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003113 goto loop_continue;
3114 }
3115
Sathya Perla6384a4d2013-10-25 10:40:16 +05303116 /* Don't do gro when we're busy_polling */
3117 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003118 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003119 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05303120 be_rx_compl_process(rxo, napi, rxcp);
3121
Sathya Perla12004ae2011-08-02 19:57:46 +00003122loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05303123 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00003124 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003125 }
3126
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003127 if (work_done) {
3128 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003129
Sathya Perla6384a4d2013-10-25 10:40:16 +05303130 /* When an rx-obj gets into post_starved state, just
3131 * let be_worker do the posting.
3132 */
3133 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3134 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303135 be_post_rx_frags(rxo, GFP_ATOMIC,
3136 max_t(u32, MAX_RX_POST,
3137 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003138 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003139
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003140 return work_done;
3141}
3142
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303143static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303144{
3145 switch (status) {
3146 case BE_TX_COMP_HDR_PARSE_ERR:
3147 tx_stats(txo)->tx_hdr_parse_err++;
3148 break;
3149 case BE_TX_COMP_NDMA_ERR:
3150 tx_stats(txo)->tx_dma_err++;
3151 break;
3152 case BE_TX_COMP_ACL_ERR:
3153 tx_stats(txo)->tx_spoof_check_err++;
3154 break;
3155 }
3156}
3157
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303158static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303159{
3160 switch (status) {
3161 case LANCER_TX_COMP_LSO_ERR:
3162 tx_stats(txo)->tx_tso_err++;
3163 break;
3164 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3165 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3166 tx_stats(txo)->tx_spoof_check_err++;
3167 break;
3168 case LANCER_TX_COMP_QINQ_ERR:
3169 tx_stats(txo)->tx_qinq_err++;
3170 break;
3171 case LANCER_TX_COMP_PARITY_ERR:
3172 tx_stats(txo)->tx_internal_parity_err++;
3173 break;
3174 case LANCER_TX_COMP_DMA_ERR:
3175 tx_stats(txo)->tx_dma_err++;
3176 break;
3177 }
3178}
3179
Sathya Perlac8f64612014-09-02 09:56:55 +05303180static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3181 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003182{
Sathya Perlac8f64612014-09-02 09:56:55 +05303183 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303184 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003185
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303186 while ((txcp = be_tx_compl_get(txo))) {
3187 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303188 work_done++;
3189
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303190 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303191 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303192 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303193 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303194 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303195 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003196 }
3197
3198 if (work_done) {
3199 be_cq_notify(adapter, txo->cq.id, true, work_done);
3200 atomic_sub(num_wrbs, &txo->q.used);
3201
3202 /* As Tx wrbs have been freed up, wake up netdev queue
3203 * if it was stopped due to lack of tx wrbs. */
3204 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303205 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003206 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003207 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003208
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003209 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3210 tx_stats(txo)->tx_compl += work_done;
3211 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3212 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003213}
Sathya Perla3c8def92011-06-12 20:01:58 +00003214
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003215#ifdef CONFIG_NET_RX_BUSY_POLL
3216static inline bool be_lock_napi(struct be_eq_obj *eqo)
3217{
3218 bool status = true;
3219
3220 spin_lock(&eqo->lock); /* BH is already disabled */
3221 if (eqo->state & BE_EQ_LOCKED) {
3222 WARN_ON(eqo->state & BE_EQ_NAPI);
3223 eqo->state |= BE_EQ_NAPI_YIELD;
3224 status = false;
3225 } else {
3226 eqo->state = BE_EQ_NAPI;
3227 }
3228 spin_unlock(&eqo->lock);
3229 return status;
3230}
3231
3232static inline void be_unlock_napi(struct be_eq_obj *eqo)
3233{
3234 spin_lock(&eqo->lock); /* BH is already disabled */
3235
3236 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3237 eqo->state = BE_EQ_IDLE;
3238
3239 spin_unlock(&eqo->lock);
3240}
3241
3242static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3243{
3244 bool status = true;
3245
3246 spin_lock_bh(&eqo->lock);
3247 if (eqo->state & BE_EQ_LOCKED) {
3248 eqo->state |= BE_EQ_POLL_YIELD;
3249 status = false;
3250 } else {
3251 eqo->state |= BE_EQ_POLL;
3252 }
3253 spin_unlock_bh(&eqo->lock);
3254 return status;
3255}
3256
3257static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3258{
3259 spin_lock_bh(&eqo->lock);
3260
3261 WARN_ON(eqo->state & (BE_EQ_NAPI));
3262 eqo->state = BE_EQ_IDLE;
3263
3264 spin_unlock_bh(&eqo->lock);
3265}
3266
3267static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3268{
3269 spin_lock_init(&eqo->lock);
3270 eqo->state = BE_EQ_IDLE;
3271}
3272
3273static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3274{
3275 local_bh_disable();
3276
3277 /* It's enough to just acquire napi lock on the eqo to stop
3278 * be_busy_poll() from processing any queueus.
3279 */
3280 while (!be_lock_napi(eqo))
3281 mdelay(1);
3282
3283 local_bh_enable();
3284}
3285
3286#else /* CONFIG_NET_RX_BUSY_POLL */
3287
3288static inline bool be_lock_napi(struct be_eq_obj *eqo)
3289{
3290 return true;
3291}
3292
3293static inline void be_unlock_napi(struct be_eq_obj *eqo)
3294{
3295}
3296
3297static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3298{
3299 return false;
3300}
3301
3302static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3303{
3304}
3305
3306static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3307{
3308}
3309
3310static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3311{
3312}
3313#endif /* CONFIG_NET_RX_BUSY_POLL */
3314
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303315int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003316{
3317 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3318 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003319 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303320 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303321 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003322 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003323
Sathya Perla0b545a62012-11-23 00:27:18 +00003324 num_evts = events_get(eqo);
3325
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303326 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3327 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003328
Sathya Perla6384a4d2013-10-25 10:40:16 +05303329 if (be_lock_napi(eqo)) {
3330 /* This loop will iterate twice for EQ0 in which
3331 * completions of the last RXQ (default one) are also processed
3332 * For other EQs the loop iterates only once
3333 */
3334 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3335 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3336 max_work = max(work, max_work);
3337 }
3338 be_unlock_napi(eqo);
3339 } else {
3340 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003341 }
3342
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003343 if (is_mcc_eqo(eqo))
3344 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003345
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003346 if (max_work < budget) {
3347 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003348
3349 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3350 * delay via a delay multiplier encoding value
3351 */
3352 if (skyhawk_chip(adapter))
3353 mult_enc = be_get_eq_delay_mult_enc(eqo);
3354
3355 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3356 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003357 } else {
3358 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003359 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003360 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003361 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003362}
3363
Sathya Perla6384a4d2013-10-25 10:40:16 +05303364#ifdef CONFIG_NET_RX_BUSY_POLL
3365static int be_busy_poll(struct napi_struct *napi)
3366{
3367 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3368 struct be_adapter *adapter = eqo->adapter;
3369 struct be_rx_obj *rxo;
3370 int i, work = 0;
3371
3372 if (!be_lock_busy_poll(eqo))
3373 return LL_FLUSH_BUSY;
3374
3375 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3376 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3377 if (work)
3378 break;
3379 }
3380
3381 be_unlock_busy_poll(eqo);
3382 return work;
3383}
3384#endif
3385
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003386void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003387{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003388 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3389 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003390 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303391 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003392
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303393 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003394 return;
3395
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003396 if (lancer_chip(adapter)) {
3397 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3398 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303399 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003400 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303401 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003402 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303403 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303404 /* Do not log error messages if its a FW reset */
3405 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3406 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3407 dev_info(dev, "Firmware update in progress\n");
3408 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303409 dev_err(dev, "Error detected in the card\n");
3410 dev_err(dev, "ERR: sliport status 0x%x\n",
3411 sliport_status);
3412 dev_err(dev, "ERR: sliport error1 0x%x\n",
3413 sliport_err1);
3414 dev_err(dev, "ERR: sliport error2 0x%x\n",
3415 sliport_err2);
3416 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003417 }
3418 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003419 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3420 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3421 ue_lo_mask = ioread32(adapter->pcicfg +
3422 PCICFG_UE_STATUS_LOW_MASK);
3423 ue_hi_mask = ioread32(adapter->pcicfg +
3424 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003425
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003426 ue_lo = (ue_lo & ~ue_lo_mask);
3427 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003428
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303429 /* On certain platforms BE hardware can indicate spurious UEs.
3430 * Allow HW to stop working completely in case of a real UE.
3431 * Hence not setting the hw_error for UE detection.
3432 */
3433
3434 if (ue_lo || ue_hi) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303435 dev_err(dev, "Error detected in the adapter");
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303436 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303437 be_set_error(adapter, BE_ERROR_UE);
3438
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303439 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3440 if (ue_lo & 1)
3441 dev_err(dev, "UE: %s bit set\n",
3442 ue_status_low_desc[i]);
3443 }
3444 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3445 if (ue_hi & 1)
3446 dev_err(dev, "UE: %s bit set\n",
3447 ue_status_hi_desc[i]);
3448 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303449 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003450 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003451}
3452
Sathya Perla8d56ff12009-11-22 22:02:26 +00003453static void be_msix_disable(struct be_adapter *adapter)
3454{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003455 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003456 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003457 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303458 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003459 }
3460}
3461
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003462static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003463{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003464 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003465 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003466 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003467
Sathya Perlace7faf02016-06-22 08:54:53 -04003468 /* If RoCE is supported, program the max number of vectors that
3469 * could be used for NIC and RoCE, else, just program the number
3470 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303471 */
Sathya Perlae2617682016-06-22 08:54:54 -04003472 if (be_roce_supported(adapter)) {
3473 max_roce_eqs =
3474 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3475 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3476 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3477 } else {
3478 num_vec = max(adapter->cfg_num_rx_irqs,
3479 adapter->cfg_num_tx_irqs);
3480 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003481
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003482 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003483 adapter->msix_entries[i].entry = i;
3484
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003485 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3486 MIN_MSIX_VECTORS, num_vec);
3487 if (num_vec < 0)
3488 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003489
Sathya Perla92bf14a2013-08-27 16:57:32 +05303490 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3491 adapter->num_msix_roce_vec = num_vec / 2;
3492 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3493 adapter->num_msix_roce_vec);
3494 }
3495
3496 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3497
3498 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3499 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003500 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003501
3502fail:
3503 dev_warn(dev, "MSIx enable failed\n");
3504
3505 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003506 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003507 return num_vec;
3508 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003509}
3510
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003511static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303512 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003513{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303514 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003515}
3516
3517static int be_msix_register(struct be_adapter *adapter)
3518{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003519 struct net_device *netdev = adapter->netdev;
3520 struct be_eq_obj *eqo;
3521 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003522
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003523 for_all_evt_queues(adapter, eqo, i) {
3524 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3525 vec = be_msix_vec_get(adapter, eqo);
3526 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003527 if (status)
3528 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003529
3530 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003531 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003532
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003534err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303535 for (i--; i >= 0; i--) {
3536 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003537 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303538 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003539 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303540 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003541 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003542 return status;
3543}
3544
3545static int be_irq_register(struct be_adapter *adapter)
3546{
3547 struct net_device *netdev = adapter->netdev;
3548 int status;
3549
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003550 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003551 status = be_msix_register(adapter);
3552 if (status == 0)
3553 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003554 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003555 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003556 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003557 }
3558
Sathya Perlae49cc342012-11-27 19:50:02 +00003559 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003560 netdev->irq = adapter->pdev->irq;
3561 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003562 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003563 if (status) {
3564 dev_err(&adapter->pdev->dev,
3565 "INTx request IRQ failed - err %d\n", status);
3566 return status;
3567 }
3568done:
3569 adapter->isr_registered = true;
3570 return 0;
3571}
3572
3573static void be_irq_unregister(struct be_adapter *adapter)
3574{
3575 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003576 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003577 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003578
3579 if (!adapter->isr_registered)
3580 return;
3581
3582 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003583 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003584 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003585 goto done;
3586 }
3587
3588 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003589 for_all_evt_queues(adapter, eqo, i) {
3590 vec = be_msix_vec_get(adapter, eqo);
3591 irq_set_affinity_hint(vec, NULL);
3592 free_irq(vec, eqo);
3593 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003594
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003595done:
3596 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003597}
3598
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003599static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003600{
Ajit Khaparde62219062016-02-10 22:45:53 +05303601 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003602 struct be_queue_info *q;
3603 struct be_rx_obj *rxo;
3604 int i;
3605
3606 for_all_rx_queues(adapter, rxo, i) {
3607 q = &rxo->q;
3608 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003609 /* If RXQs are destroyed while in an "out of buffer"
3610 * state, there is a possibility of an HW stall on
3611 * Lancer. So, post 64 buffers to each queue to relieve
3612 * the "out of buffer" condition.
3613 * Make sure there's space in the RXQ before posting.
3614 */
3615 if (lancer_chip(adapter)) {
3616 be_rx_cq_clean(rxo);
3617 if (atomic_read(&q->used) == 0)
3618 be_post_rx_frags(rxo, GFP_KERNEL,
3619 MAX_RX_POST);
3620 }
3621
Sathya Perla482c9e72011-06-29 23:33:17 +00003622 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003623 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003624 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003625 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003626 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003627 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303628
3629 if (rss->rss_flags) {
3630 rss->rss_flags = RSS_ENABLE_NONE;
3631 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3632 128, rss->rss_hkey);
3633 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003634}
3635
Kalesh APbcc84142015-08-05 03:27:48 -04003636static void be_disable_if_filters(struct be_adapter *adapter)
3637{
Ivan Vecera02434de2017-01-13 22:38:28 +01003638 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3639 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
Ivan Vecera6129fd92017-01-31 20:01:31 +01003640 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
Ivan Vecera02434de2017-01-13 22:38:28 +01003641 be_dev_mac_del(adapter, adapter->pmac_id[0]);
Ivan Vecera6129fd92017-01-31 20:01:31 +01003642 eth_zero_addr(adapter->dev_mac);
3643 }
Ivan Vecera02434de2017-01-13 22:38:28 +01003644
Kalesh APbcc84142015-08-05 03:27:48 -04003645 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003646 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003647
3648 /* The IFACE flags are enabled in the open path and cleared
3649 * in the close path. When a VF gets detached from the host and
3650 * assigned to a VM the following happens:
3651 * - VF's IFACE flags get cleared in the detach path
3652 * - IFACE create is issued by the VF in the attach path
3653 * Due to a bug in the BE3/Skyhawk-R FW
3654 * (Lancer FW doesn't have the bug), the IFACE capability flags
3655 * specified along with the IFACE create cmd issued by a VF are not
3656 * honoured by FW. As a consequence, if a *new* driver
3657 * (that enables/disables IFACE flags in open/close)
3658 * is loaded in the host and an *old* driver is * used by a VM/VF,
3659 * the IFACE gets created *without* the needed flags.
3660 * To avoid this, disable RX-filter flags only for Lancer.
3661 */
3662 if (lancer_chip(adapter)) {
3663 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3664 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3665 }
3666}
3667
Sathya Perla889cd4b2010-05-30 23:33:45 +00003668static int be_close(struct net_device *netdev)
3669{
3670 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003671 struct be_eq_obj *eqo;
3672 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003673
Kalesh APe1ad8e32014-04-14 16:12:41 +05303674 /* This protection is needed as be_close() may be called even when the
3675 * adapter is in cleared state (after eeh perm failure)
3676 */
3677 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3678 return 0;
3679
Sathya Perlab7172412016-07-27 05:26:18 -04003680 /* Before attempting cleanup ensure all the pending cmds in the
3681 * config_wq have finished execution
3682 */
3683 flush_workqueue(be_wq);
3684
Kalesh APbcc84142015-08-05 03:27:48 -04003685 be_disable_if_filters(adapter);
3686
Ivan Veceradff345c52013-11-27 08:59:32 +01003687 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3688 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003689 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303690 be_disable_busy_poll(eqo);
3691 }
David S. Miller71237b62013-11-28 18:53:36 -05003692 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003693 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003694
3695 be_async_mcc_disable(adapter);
3696
3697 /* Wait for all pending tx completions to arrive so that
3698 * all tx skbs are freed.
3699 */
Sathya Perlafba87552013-05-08 02:05:50 +00003700 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303701 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003702
3703 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003704
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003705 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003706 if (msix_enabled(adapter))
3707 synchronize_irq(be_msix_vec_get(adapter, eqo));
3708 else
3709 synchronize_irq(netdev->irq);
3710 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003711 }
3712
Sathya Perla889cd4b2010-05-30 23:33:45 +00003713 be_irq_unregister(adapter);
3714
Sathya Perla482c9e72011-06-29 23:33:17 +00003715 return 0;
3716}
3717
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003718static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003719{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003720 struct rss_info *rss = &adapter->rss_info;
3721 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003722 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003723 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003724
3725 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003726 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3727 sizeof(struct be_eth_rx_d));
3728 if (rc)
3729 return rc;
3730 }
3731
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003732 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3733 rxo = default_rxo(adapter);
3734 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3735 rx_frag_size, adapter->if_handle,
3736 false, &rxo->rss_id);
3737 if (rc)
3738 return rc;
3739 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003740
3741 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003742 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003743 rx_frag_size, adapter->if_handle,
3744 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003745 if (rc)
3746 return rc;
3747 }
3748
3749 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003750 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003751 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303752 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003753 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303754 rss->rsstable[j + i] = rxo->rss_id;
3755 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003756 }
3757 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303758 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3759 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003760
3761 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303762 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3763 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303764
3765 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3766 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3767 RSS_INDIR_TABLE_LEN, rss_key);
3768 if (rc) {
3769 rss->rss_flags = RSS_ENABLE_NONE;
3770 return rc;
3771 }
3772
3773 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303774 } else {
3775 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303776 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303777 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003778
Venkata Duvvurue2557872014-04-21 15:38:00 +05303779
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003780 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3781 * which is a queue empty condition
3782 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003783 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003784 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3785
Sathya Perla889cd4b2010-05-30 23:33:45 +00003786 return 0;
3787}
3788
Kalesh APbcc84142015-08-05 03:27:48 -04003789static int be_enable_if_filters(struct be_adapter *adapter)
3790{
3791 int status;
3792
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003793 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003794 if (status)
3795 return status;
3796
Ivan Vecera6129fd92017-01-31 20:01:31 +01003797 /* Normally this condition usually true as the ->dev_mac is zeroed.
3798 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3799 * subsequent be_dev_mac_add() can fail (after fresh boot)
3800 */
3801 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3802 int old_pmac_id = -1;
3803
3804 /* Remember old programmed MAC if any - can happen on BE3 VF */
3805 if (!is_zero_ether_addr(adapter->dev_mac))
3806 old_pmac_id = adapter->pmac_id[0];
3807
Suresh Reddy988d44b2016-09-07 19:57:52 +05303808 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003809 if (status)
3810 return status;
Ivan Vecera6129fd92017-01-31 20:01:31 +01003811
3812 /* Delete the old programmed MAC as we successfully programmed
3813 * a new MAC
3814 */
3815 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3816 be_dev_mac_del(adapter, old_pmac_id);
3817
Suresh Reddyc27ebf52016-09-07 19:57:53 +05303818 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003819 }
3820
3821 if (adapter->vlans_added)
3822 be_vid_config(adapter);
3823
Sathya Perlab7172412016-07-27 05:26:18 -04003824 __be_set_rx_mode(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003825
3826 return 0;
3827}
3828
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003829static int be_open(struct net_device *netdev)
3830{
3831 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003832 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003833 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003834 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003835 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003836 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003837
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003838 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003839 if (status)
3840 goto err;
3841
Kalesh APbcc84142015-08-05 03:27:48 -04003842 status = be_enable_if_filters(adapter);
3843 if (status)
3844 goto err;
3845
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003846 status = be_irq_register(adapter);
3847 if (status)
3848 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003849
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003850 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003851 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003852
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003853 for_all_tx_queues(adapter, txo, i)
3854 be_cq_notify(adapter, txo->cq.id, true, 0);
3855
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003856 be_async_mcc_enable(adapter);
3857
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003858 for_all_evt_queues(adapter, eqo, i) {
3859 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303860 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003861 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003862 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003863 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003864
Sathya Perla323ff712012-09-28 04:39:43 +00003865 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003866 if (!status)
3867 be_link_status_update(adapter, link_status);
3868
Sathya Perlafba87552013-05-08 02:05:50 +00003869 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303870 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003871 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303872
Sathya Perla889cd4b2010-05-30 23:33:45 +00003873 return 0;
3874err:
3875 be_close(adapter->netdev);
3876 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003877}
3878
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003879static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3880{
3881 u32 addr;
3882
3883 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3884
3885 mac[5] = (u8)(addr & 0xFF);
3886 mac[4] = (u8)((addr >> 8) & 0xFF);
3887 mac[3] = (u8)((addr >> 16) & 0xFF);
3888 /* Use the OUI from the current MAC address */
3889 memcpy(mac, adapter->netdev->dev_addr, 3);
3890}
3891
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003892/*
3893 * Generate a seed MAC address from the PF MAC Address using jhash.
3894 * MAC Address for VFs are assigned incrementally starting from the seed.
3895 * These addresses are programmed in the ASIC by the PF and the VF driver
3896 * queries for the MAC address during its probe.
3897 */
Sathya Perla4c876612013-02-03 20:30:11 +00003898static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003899{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003900 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003901 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003902 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003903 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003904
3905 be_vf_eth_addr_generate(adapter, mac);
3906
Sathya Perla11ac75e2011-12-13 00:58:50 +00003907 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303908 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003909 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003910 vf_cfg->if_handle,
3911 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303912 else
3913 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3914 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003915
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003916 if (status)
3917 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303918 "Mac address assignment failed for VF %d\n",
3919 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003920 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003921 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003922
3923 mac[5] += 1;
3924 }
3925 return status;
3926}
3927
Sathya Perla4c876612013-02-03 20:30:11 +00003928static int be_vfs_mac_query(struct be_adapter *adapter)
3929{
3930 int status, vf;
3931 u8 mac[ETH_ALEN];
3932 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003933
3934 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303935 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3936 mac, vf_cfg->if_handle,
3937 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003938 if (status)
3939 return status;
3940 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3941 }
3942 return 0;
3943}
3944
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003945static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003946{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003947 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003948 u32 vf;
3949
Sathya Perla257a3fe2013-06-14 15:54:51 +05303950 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003951 dev_warn(&adapter->pdev->dev,
3952 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003953 goto done;
3954 }
3955
Sathya Perlab4c1df92013-05-08 02:05:47 +00003956 pci_disable_sriov(adapter->pdev);
3957
Sathya Perla11ac75e2011-12-13 00:58:50 +00003958 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303959 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003960 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3961 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303962 else
3963 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3964 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003965
Sathya Perla11ac75e2011-12-13 00:58:50 +00003966 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3967 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003968
3969 if (BE3_chip(adapter))
3970 be_cmd_set_hsw_config(adapter, 0, 0,
3971 adapter->if_handle,
3972 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003973done:
3974 kfree(adapter->vf_cfg);
3975 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303976 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003977}
3978
Sathya Perla77071332013-08-27 16:57:34 +05303979static void be_clear_queues(struct be_adapter *adapter)
3980{
3981 be_mcc_queues_destroy(adapter);
3982 be_rx_cqs_destroy(adapter);
3983 be_tx_queues_destroy(adapter);
3984 be_evt_queues_destroy(adapter);
3985}
3986
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303987static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003988{
Sathya Perla191eb752012-02-23 18:50:13 +00003989 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3990 cancel_delayed_work_sync(&adapter->work);
3991 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3992 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303993}
3994
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003995static void be_cancel_err_detection(struct be_adapter *adapter)
3996{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303997 struct be_error_recovery *err_rec = &adapter->error_recovery;
3998
3999 if (!be_err_recovery_workq)
4000 return;
4001
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004002 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304003 cancel_delayed_work_sync(&err_rec->err_detection_work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004004 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
4005 }
4006}
4007
Sathya Perlac9c47142014-03-27 10:46:19 +05304008static void be_disable_vxlan_offloads(struct be_adapter *adapter)
4009{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004010 struct net_device *netdev = adapter->netdev;
4011
Sathya Perlac9c47142014-03-27 10:46:19 +05304012 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
4013 be_cmd_manage_iface(adapter, adapter->if_handle,
4014 OP_CONVERT_TUNNEL_TO_NORMAL);
4015
4016 if (adapter->vxlan_port)
4017 be_cmd_set_vxlan_port(adapter, 0);
4018
4019 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
4020 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004021
4022 netdev->hw_enc_features = 0;
4023 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304024 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05304025}
4026
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004027static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4028 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05004029{
4030 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004031 u32 vf_if_cap_flags = res.vf_if_cap_flags;
4032 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004033 u16 num_vf_qs = 1;
4034
Somnath Koturde2b1e02016-06-06 07:22:10 -04004035 /* Distribute the queue resources among the PF and it's VFs */
4036 if (num_vfs) {
4037 /* Divide the rx queues evenly among the VFs and the PF, capped
4038 * at VF-EQ-count. Any remainder queues belong to the PF.
4039 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05304040 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4041 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05004042
Somnath Koturde2b1e02016-06-06 07:22:10 -04004043 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4044 * RSS Tables per port. Provide RSS on VFs, only if number of
4045 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05004046 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004047 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05004048 num_vf_qs = 1;
4049 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004050
4051 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4052 * which are modifiable using SET_PROFILE_CONFIG cmd.
4053 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004054 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4055 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004056
4057 /* If RSS IFACE capability flags are modifiable for a VF, set the
4058 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4059 * more than 1 RSSQ is available for a VF.
4060 * Otherwise, provision only 1 queue pair for VF.
4061 */
4062 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4063 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4064 if (num_vf_qs > 1) {
4065 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4066 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4067 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4068 } else {
4069 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4070 BE_IF_FLAGS_DEFQ_RSS);
4071 }
4072 } else {
4073 num_vf_qs = 1;
4074 }
4075
4076 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4077 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4078 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4079 }
4080
4081 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4082 vft_res->max_rx_qs = num_vf_qs;
4083 vft_res->max_rss_qs = num_vf_qs;
4084 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4085 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4086
4087 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4088 * among the PF and it's VFs, if the fields are changeable
4089 */
4090 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4091 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4092
4093 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4094 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4095
4096 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4097 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4098
4099 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4100 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004101}
4102
Sathya Perlab7172412016-07-27 05:26:18 -04004103static void be_if_destroy(struct be_adapter *adapter)
4104{
4105 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4106
4107 kfree(adapter->pmac_id);
4108 adapter->pmac_id = NULL;
4109
4110 kfree(adapter->mc_list);
4111 adapter->mc_list = NULL;
4112
4113 kfree(adapter->uc_list);
4114 adapter->uc_list = NULL;
4115}
4116
Somnath Koturb05004a2013-12-05 12:08:16 +05304117static int be_clear(struct be_adapter *adapter)
4118{
Vasundhara Volamf2858732015-03-04 00:44:33 -05004119 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004120 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004121
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304122 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00004123
Sathya Perlab7172412016-07-27 05:26:18 -04004124 flush_workqueue(be_wq);
4125
Sathya Perla11ac75e2011-12-13 00:58:50 +00004126 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004127 be_vf_clear(adapter);
4128
Vasundhara Volambec84e62014-06-30 13:01:32 +05304129 /* Re-configure FW to distribute resources evenly across max-supported
4130 * number of VFs, only when VFs are not already enabled.
4131 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05004132 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4133 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004134 be_calculate_vf_res(adapter,
4135 pci_sriov_get_totalvfs(pdev),
4136 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304137 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004138 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004139 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004140 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304141
Sathya Perlac9c47142014-03-27 10:46:19 +05304142 be_disable_vxlan_offloads(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004143
Sathya Perlab7172412016-07-27 05:26:18 -04004144 be_if_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004145
Sathya Perla77071332013-08-27 16:57:34 +05304146 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004147
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004148 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304149 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004150 return 0;
4151}
4152
Sathya Perla4c876612013-02-03 20:30:11 +00004153static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004154{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304155 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04004156 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00004157 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05004158 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004159
Kalesh AP0700d812015-01-20 03:51:43 -05004160 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004161 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004162
Sathya Perla4c876612013-02-03 20:30:11 +00004163 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304164 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04004165 status = be_cmd_get_profile_config(adapter, &res, NULL,
4166 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004167 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05304168 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004169 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304170 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004171 /* Prevent VFs from enabling VLAN promiscuous
4172 * mode
4173 */
4174 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4175 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304176 }
Sathya Perla4c876612013-02-03 20:30:11 +00004177
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004178 /* PF should enable IF flags during proxy if_create call */
4179 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004180 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4181 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004182 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004183 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004184 }
Kalesh AP0700d812015-01-20 03:51:43 -05004185
4186 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004187}
4188
Sathya Perla39f1d942012-05-08 19:41:24 +00004189static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004190{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004191 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004192 int vf;
4193
Sathya Perla39f1d942012-05-08 19:41:24 +00004194 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4195 GFP_KERNEL);
4196 if (!adapter->vf_cfg)
4197 return -ENOMEM;
4198
Sathya Perla11ac75e2011-12-13 00:58:50 +00004199 for_all_vfs(adapter, vf_cfg, vf) {
4200 vf_cfg->if_handle = -1;
4201 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004202 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004203 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004204}
4205
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004206static int be_vf_setup(struct be_adapter *adapter)
4207{
Sathya Perla4c876612013-02-03 20:30:11 +00004208 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304209 struct be_vf_cfg *vf_cfg;
4210 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004211 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004212
Sathya Perla257a3fe2013-06-14 15:54:51 +05304213 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004214
4215 status = be_vf_setup_init(adapter);
4216 if (status)
4217 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004218
Sathya Perla4c876612013-02-03 20:30:11 +00004219 if (old_vfs) {
4220 for_all_vfs(adapter, vf_cfg, vf) {
4221 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4222 if (status)
4223 goto err;
4224 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004225
Sathya Perla4c876612013-02-03 20:30:11 +00004226 status = be_vfs_mac_query(adapter);
4227 if (status)
4228 goto err;
4229 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304230 status = be_vfs_if_create(adapter);
4231 if (status)
4232 goto err;
4233
Sathya Perla39f1d942012-05-08 19:41:24 +00004234 status = be_vf_eth_addr_config(adapter);
4235 if (status)
4236 goto err;
4237 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004238
Sathya Perla11ac75e2011-12-13 00:58:50 +00004239 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304240 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004241 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4242 vf + 1);
4243 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304244 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004245 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304246 BE_PRIV_FILTMGMT,
4247 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004248 if (!status) {
4249 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304250 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4251 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004252 }
Sathya Perla04a06022013-07-23 15:25:00 +05304253 }
4254
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304255 /* Allow full available bandwidth */
4256 if (!old_vfs)
4257 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004258
Kalesh APe7bcbd72015-05-06 05:30:32 -04004259 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4260 vf_cfg->if_handle, NULL,
4261 &spoofchk);
4262 if (!status)
4263 vf_cfg->spoofchk = spoofchk;
4264
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304265 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304266 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304267 be_cmd_set_logical_link_config(adapter,
4268 IFLA_VF_LINK_STATE_AUTO,
4269 vf+1);
4270 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004271 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004272
4273 if (!old_vfs) {
4274 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4275 if (status) {
4276 dev_err(dev, "SRIOV enable failed\n");
4277 adapter->num_vfs = 0;
4278 goto err;
4279 }
4280 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304281
Somnath Kotur884476b2016-06-22 08:54:55 -04004282 if (BE3_chip(adapter)) {
4283 /* On BE3, enable VEB only when SRIOV is enabled */
4284 status = be_cmd_set_hsw_config(adapter, 0, 0,
4285 adapter->if_handle,
4286 PORT_FWD_TYPE_VEB, 0);
4287 if (status)
4288 goto err;
4289 }
4290
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304291 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004292 return 0;
4293err:
Sathya Perla4c876612013-02-03 20:30:11 +00004294 dev_err(dev, "VF setup failed\n");
4295 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004296 return status;
4297}
4298
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304299/* Converting function_mode bits on BE3 to SH mc_type enums */
4300
4301static u8 be_convert_mc_type(u32 function_mode)
4302{
Suresh Reddy66064db2014-06-23 16:41:29 +05304303 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304304 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304305 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304306 return FLEX10;
4307 else if (function_mode & VNIC_MODE)
4308 return vNIC2;
4309 else if (function_mode & UMC_ENABLED)
4310 return UMC;
4311 else
4312 return MC_NONE;
4313}
4314
Sathya Perla92bf14a2013-08-27 16:57:32 +05304315/* On BE2/BE3 FW does not suggest the supported limits */
4316static void BEx_get_resources(struct be_adapter *adapter,
4317 struct be_resources *res)
4318{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304319 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304320
4321 if (be_physfn(adapter))
4322 res->max_uc_mac = BE_UC_PMAC_COUNT;
4323 else
4324 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4325
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304326 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4327
4328 if (be_is_mc(adapter)) {
4329 /* Assuming that there are 4 channels per port,
4330 * when multi-channel is enabled
4331 */
4332 if (be_is_qnq_mode(adapter))
4333 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4334 else
4335 /* In a non-qnq multichannel mode, the pvid
4336 * takes up one vlan entry
4337 */
4338 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4339 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304340 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304341 }
4342
Sathya Perla92bf14a2013-08-27 16:57:32 +05304343 res->max_mcast_mac = BE_MAX_MC;
4344
Vasundhara Volama5243da2014-03-11 18:53:07 +05304345 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4346 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4347 * *only* if it is RSS-capable.
4348 */
4349 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004350 be_virtfn(adapter) ||
4351 (be_is_mc(adapter) &&
4352 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304353 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304354 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4355 struct be_resources super_nic_res = {0};
4356
4357 /* On a SuperNIC profile, the driver needs to use the
4358 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4359 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004360 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4361 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4362 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304363 /* Some old versions of BE3 FW don't report max_tx_qs value */
4364 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4365 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304366 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304367 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304368
4369 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4370 !use_sriov && be_physfn(adapter))
4371 res->max_rss_qs = (adapter->be3_native) ?
4372 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4373 res->max_rx_qs = res->max_rss_qs + 1;
4374
Suresh Reddye3dc8672014-01-06 13:02:25 +05304375 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304376 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304377 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4378 else
4379 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304380
4381 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004382 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304383 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4384 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4385}
4386
Sathya Perla30128032011-11-10 19:17:57 +00004387static void be_setup_init(struct be_adapter *adapter)
4388{
4389 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004390 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004391 adapter->if_handle = -1;
4392 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004393 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304394 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004395 if (be_physfn(adapter))
4396 adapter->cmd_privileges = MAX_PRIVILEGES;
4397 else
4398 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004399}
4400
Somnath Koturde2b1e02016-06-06 07:22:10 -04004401/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4402 * However, this HW limitation is not exposed to the host via any SLI cmd.
4403 * As a result, in the case of SRIOV and in particular multi-partition configs
4404 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4405 * for distribution between the VFs. This self-imposed limit will determine the
4406 * no: of VFs for which RSS can be enabled.
4407 */
Baoyou Xied766e7e2016-09-18 16:35:29 +08004408static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
Somnath Koturde2b1e02016-06-06 07:22:10 -04004409{
4410 struct be_port_resources port_res = {0};
4411 u8 rss_tables_on_port;
4412 u16 max_vfs = be_max_vfs(adapter);
4413
4414 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4415 RESOURCE_LIMITS, 0);
4416
4417 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4418
4419 /* Each PF Pool's RSS Tables limit =
4420 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4421 */
4422 adapter->pool_res.max_rss_tables =
4423 max_vfs * rss_tables_on_port / port_res.max_vfs;
4424}
4425
Vasundhara Volambec84e62014-06-30 13:01:32 +05304426static int be_get_sriov_config(struct be_adapter *adapter)
4427{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304428 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304429 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304430
Somnath Koturde2b1e02016-06-06 07:22:10 -04004431 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4432 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304433
Vasundhara Volamace40af2015-03-04 00:44:34 -05004434 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304435 if (BE3_chip(adapter) && !res.max_vfs) {
4436 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4437 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4438 }
4439
Sathya Perlad3d18312014-08-01 17:47:30 +05304440 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304441
Vasundhara Volamace40af2015-03-04 00:44:34 -05004442 /* If during previous unload of the driver, the VFs were not disabled,
4443 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4444 * Instead use the TotalVFs value stored in the pci-dev struct.
4445 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304446 old_vfs = pci_num_vf(adapter->pdev);
4447 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004448 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4449 old_vfs);
4450
4451 adapter->pool_res.max_vfs =
4452 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304453 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304454 }
4455
Somnath Koturde2b1e02016-06-06 07:22:10 -04004456 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4457 be_calculate_pf_pool_rss_tables(adapter);
4458 dev_info(&adapter->pdev->dev,
4459 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4460 be_max_pf_pool_rss_tables(adapter));
4461 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304462 return 0;
4463}
4464
Vasundhara Volamace40af2015-03-04 00:44:34 -05004465static void be_alloc_sriov_res(struct be_adapter *adapter)
4466{
4467 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004468 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004469 int status;
4470
4471 be_get_sriov_config(adapter);
4472
4473 if (!old_vfs)
4474 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4475
4476 /* When the HW is in SRIOV capable configuration, the PF-pool
4477 * resources are given to PF during driver load, if there are no
4478 * old VFs. This facility is not available in BE3 FW.
4479 * Also, this is done by FW in Lancer chip.
4480 */
4481 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004482 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004483 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004484 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004485 if (status)
4486 dev_err(&adapter->pdev->dev,
4487 "Failed to optimize SRIOV resources\n");
4488 }
4489}
4490
Sathya Perla92bf14a2013-08-27 16:57:32 +05304491static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004492{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304493 struct device *dev = &adapter->pdev->dev;
4494 struct be_resources res = {0};
4495 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004496
Sathya Perla92bf14a2013-08-27 16:57:32 +05304497 /* For Lancer, SH etc read per-function resource limits from FW.
4498 * GET_FUNC_CONFIG returns per function guaranteed limits.
4499 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4500 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004501 if (BEx_chip(adapter)) {
4502 BEx_get_resources(adapter, &res);
4503 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304504 status = be_cmd_get_func_config(adapter, &res);
4505 if (status)
4506 return status;
4507
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004508 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4509 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4510 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4511 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004512 }
4513
Sathya Perlace7faf02016-06-22 08:54:53 -04004514 /* If RoCE is supported stash away half the EQs for RoCE */
4515 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4516 res.max_evt_qs / 2 : res.max_evt_qs;
4517 adapter->res = res;
4518
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004519 /* If FW supports RSS default queue, then skip creating non-RSS
4520 * queue for non-IP traffic.
4521 */
4522 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4523 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4524
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304525 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4526 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004527 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304528 be_max_vfs(adapter));
4529 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4530 be_max_uc(adapter), be_max_mc(adapter),
4531 be_max_vlans(adapter));
4532
Sathya Perlae2617682016-06-22 08:54:54 -04004533 /* Ensure RX and TX queues are created in pairs at init time */
4534 adapter->cfg_num_rx_irqs =
4535 min_t(u16, netif_get_num_default_rss_queues(),
4536 be_max_qp_irqs(adapter));
4537 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304538 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004539}
4540
Sathya Perla39f1d942012-05-08 19:41:24 +00004541static int be_get_config(struct be_adapter *adapter)
4542{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004543 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304544 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004545
Suresh Reddy980df242015-12-30 01:29:03 -05004546 status = be_cmd_get_cntl_attributes(adapter);
4547 if (status)
4548 return status;
4549
Kalesh APe97e3cd2014-07-17 16:20:26 +05304550 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004551 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304552 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004553
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004554 if (!lancer_chip(adapter) && be_physfn(adapter))
4555 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4556
Sathya Perla6b085ba2015-02-23 04:20:09 -05004557 if (BEx_chip(adapter)) {
4558 level = be_cmd_get_fw_log_level(adapter);
4559 adapter->msg_enable =
4560 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4561 }
4562
4563 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004564 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4565 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004566
Vasundhara Volam21252372015-02-06 08:18:42 -05004567 be_cmd_query_port_name(adapter);
4568
4569 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304570 status = be_cmd_get_active_profile(adapter, &profile_id);
4571 if (!status)
4572 dev_info(&adapter->pdev->dev,
4573 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304574 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304575
Sathya Perla92bf14a2013-08-27 16:57:32 +05304576 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004577}
4578
Sathya Perla95046b92013-07-23 15:25:02 +05304579static int be_mac_setup(struct be_adapter *adapter)
4580{
4581 u8 mac[ETH_ALEN];
4582 int status;
4583
4584 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4585 status = be_cmd_get_perm_mac(adapter, mac);
4586 if (status)
4587 return status;
4588
4589 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4590 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Ivan Vecera6129fd92017-01-31 20:01:31 +01004591
4592 /* Initial MAC for BE3 VFs is already programmed by PF */
4593 if (BEx_chip(adapter) && be_virtfn(adapter))
4594 memcpy(adapter->dev_mac, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304595 }
4596
Sathya Perla95046b92013-07-23 15:25:02 +05304597 return 0;
4598}
4599
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304600static void be_schedule_worker(struct be_adapter *adapter)
4601{
Sathya Perlab7172412016-07-27 05:26:18 -04004602 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304603 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4604}
4605
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304606static void be_destroy_err_recovery_workq(void)
4607{
4608 if (!be_err_recovery_workq)
4609 return;
4610
4611 flush_workqueue(be_err_recovery_workq);
4612 destroy_workqueue(be_err_recovery_workq);
4613 be_err_recovery_workq = NULL;
4614}
4615
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304616static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004617{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304618 struct be_error_recovery *err_rec = &adapter->error_recovery;
4619
4620 if (!be_err_recovery_workq)
4621 return;
4622
4623 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4624 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004625 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4626}
4627
Sathya Perla77071332013-08-27 16:57:34 +05304628static int be_setup_queues(struct be_adapter *adapter)
4629{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304630 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304631 int status;
4632
4633 status = be_evt_queues_create(adapter);
4634 if (status)
4635 goto err;
4636
4637 status = be_tx_qs_create(adapter);
4638 if (status)
4639 goto err;
4640
4641 status = be_rx_cqs_create(adapter);
4642 if (status)
4643 goto err;
4644
4645 status = be_mcc_queues_create(adapter);
4646 if (status)
4647 goto err;
4648
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304649 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4650 if (status)
4651 goto err;
4652
4653 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4654 if (status)
4655 goto err;
4656
Sathya Perla77071332013-08-27 16:57:34 +05304657 return 0;
4658err:
4659 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4660 return status;
4661}
4662
Ajit Khaparde62219062016-02-10 22:45:53 +05304663static int be_if_create(struct be_adapter *adapter)
4664{
4665 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4666 u32 cap_flags = be_if_cap_flags(adapter);
4667 int status;
4668
Sathya Perlab7172412016-07-27 05:26:18 -04004669 /* alloc required memory for other filtering fields */
4670 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4671 sizeof(*adapter->pmac_id), GFP_KERNEL);
4672 if (!adapter->pmac_id)
4673 return -ENOMEM;
4674
4675 adapter->mc_list = kcalloc(be_max_mc(adapter),
4676 sizeof(*adapter->mc_list), GFP_KERNEL);
4677 if (!adapter->mc_list)
4678 return -ENOMEM;
4679
4680 adapter->uc_list = kcalloc(be_max_uc(adapter),
4681 sizeof(*adapter->uc_list), GFP_KERNEL);
4682 if (!adapter->uc_list)
4683 return -ENOMEM;
4684
Sathya Perlae2617682016-06-22 08:54:54 -04004685 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304686 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4687
4688 en_flags &= cap_flags;
4689 /* will enable all the needed filter flags in be_open() */
4690 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4691 &adapter->if_handle, 0);
4692
Sathya Perlab7172412016-07-27 05:26:18 -04004693 if (status)
4694 return status;
4695
4696 return 0;
Ajit Khaparde62219062016-02-10 22:45:53 +05304697}
4698
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304699int be_update_queues(struct be_adapter *adapter)
4700{
4701 struct net_device *netdev = adapter->netdev;
4702 int status;
4703
4704 if (netif_running(netdev))
4705 be_close(netdev);
4706
4707 be_cancel_worker(adapter);
4708
4709 /* If any vectors have been shared with RoCE we cannot re-program
4710 * the MSIx table.
4711 */
4712 if (!adapter->num_msix_roce_vec)
4713 be_msix_disable(adapter);
4714
4715 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304716 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4717 if (status)
4718 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304719
4720 if (!msix_enabled(adapter)) {
4721 status = be_msix_enable(adapter);
4722 if (status)
4723 return status;
4724 }
4725
Ajit Khaparde62219062016-02-10 22:45:53 +05304726 status = be_if_create(adapter);
4727 if (status)
4728 return status;
4729
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304730 status = be_setup_queues(adapter);
4731 if (status)
4732 return status;
4733
4734 be_schedule_worker(adapter);
4735
4736 if (netif_running(netdev))
4737 status = be_open(netdev);
4738
4739 return status;
4740}
4741
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004742static inline int fw_major_num(const char *fw_ver)
4743{
4744 int fw_major = 0, i;
4745
4746 i = sscanf(fw_ver, "%d.", &fw_major);
4747 if (i != 1)
4748 return 0;
4749
4750 return fw_major;
4751}
4752
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304753/* If it is error recovery, FLR the PF
4754 * Else if any VFs are already enabled don't FLR the PF
4755 */
Sathya Perlaf962f842015-02-23 04:20:16 -05004756static bool be_reset_required(struct be_adapter *adapter)
4757{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304758 if (be_error_recovering(adapter))
4759 return true;
4760 else
4761 return pci_num_vf(adapter->pdev) == 0;
Sathya Perlaf962f842015-02-23 04:20:16 -05004762}
4763
4764/* Wait for the FW to be ready and perform the required initialization */
4765static int be_func_init(struct be_adapter *adapter)
4766{
4767 int status;
4768
4769 status = be_fw_wait_ready(adapter);
4770 if (status)
4771 return status;
4772
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304773 /* FW is now ready; clear errors to allow cmds/doorbell */
4774 be_clear_error(adapter, BE_CLEAR_ALL);
4775
Sathya Perlaf962f842015-02-23 04:20:16 -05004776 if (be_reset_required(adapter)) {
4777 status = be_cmd_reset_function(adapter);
4778 if (status)
4779 return status;
4780
4781 /* Wait for interrupts to quiesce after an FLR */
4782 msleep(100);
Sathya Perlaf962f842015-02-23 04:20:16 -05004783 }
4784
4785 /* Tell FW we're ready to fire cmds */
4786 status = be_cmd_fw_init(adapter);
4787 if (status)
4788 return status;
4789
4790 /* Allow interrupts for other ULPs running on NIC function */
4791 be_intr_set(adapter, true);
4792
4793 return 0;
4794}
4795
Sathya Perla5fb379e2009-06-18 00:02:59 +00004796static int be_setup(struct be_adapter *adapter)
4797{
Sathya Perla39f1d942012-05-08 19:41:24 +00004798 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004799 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004800
Sathya Perlaf962f842015-02-23 04:20:16 -05004801 status = be_func_init(adapter);
4802 if (status)
4803 return status;
4804
Sathya Perla30128032011-11-10 19:17:57 +00004805 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004806
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004807 if (!lancer_chip(adapter))
4808 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004809
Suresh Reddy980df242015-12-30 01:29:03 -05004810 /* invoke this cmd first to get pf_num and vf_num which are needed
4811 * for issuing profile related cmds
4812 */
4813 if (!BEx_chip(adapter)) {
4814 status = be_cmd_get_func_config(adapter, NULL);
4815 if (status)
4816 return status;
4817 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004818
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004819 status = be_get_config(adapter);
4820 if (status)
4821 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004822
Somnath Koturde2b1e02016-06-06 07:22:10 -04004823 if (!BE2_chip(adapter) && be_physfn(adapter))
4824 be_alloc_sriov_res(adapter);
4825
4826 status = be_get_resources(adapter);
4827 if (status)
4828 goto err;
4829
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004830 status = be_msix_enable(adapter);
4831 if (status)
4832 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004833
Kalesh APbcc84142015-08-05 03:27:48 -04004834 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304835 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004836 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004837 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004838
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304839 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4840 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304841 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304842 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004843 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004844 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004845
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004846 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004847
Sathya Perla95046b92013-07-23 15:25:02 +05304848 status = be_mac_setup(adapter);
4849 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004850 goto err;
4851
Kalesh APe97e3cd2014-07-17 16:20:26 +05304852 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304853 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004854
Somnath Koture9e2a902013-10-24 14:37:53 +05304855 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304856 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304857 adapter->fw_ver);
4858 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4859 }
4860
Kalesh AP00d594c2015-01-20 03:51:44 -05004861 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4862 adapter->rx_fc);
4863 if (status)
4864 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4865 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004866
Kalesh AP00d594c2015-01-20 03:51:44 -05004867 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4868 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004869
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304870 if (be_physfn(adapter))
4871 be_cmd_set_logical_link_config(adapter,
4872 IFLA_VF_LINK_STATE_AUTO, 0);
4873
Somnath Kotur884476b2016-06-22 08:54:55 -04004874 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4875 * confusing a linux bridge or OVS that it might be connected to.
4876 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4877 * when SRIOV is not enabled.
4878 */
4879 if (BE3_chip(adapter))
4880 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4881 PORT_FWD_TYPE_PASSTHRU, 0);
4882
Vasundhara Volambec84e62014-06-30 13:01:32 +05304883 if (adapter->num_vfs)
4884 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004885
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004886 status = be_cmd_get_phy_info(adapter);
4887 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004888 adapter->phy.fc_autoneg = 1;
4889
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304890 if (be_physfn(adapter) && !lancer_chip(adapter))
4891 be_cmd_set_features(adapter);
4892
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304893 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304894 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004895 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004896err:
4897 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004898 return status;
4899}
4900
Ivan Vecera66268732011-12-08 01:31:21 +00004901#ifdef CONFIG_NET_POLL_CONTROLLER
4902static void be_netpoll(struct net_device *netdev)
4903{
4904 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004905 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004906 int i;
4907
Sathya Perlae49cc342012-11-27 19:50:02 +00004908 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004909 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004910 napi_schedule(&eqo->napi);
4911 }
Ivan Vecera66268732011-12-08 01:31:21 +00004912}
4913#endif
4914
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004915int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4916{
4917 const struct firmware *fw;
4918 int status;
4919
4920 if (!netif_running(adapter->netdev)) {
4921 dev_err(&adapter->pdev->dev,
4922 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304923 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004924 }
4925
4926 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4927 if (status)
4928 goto fw_exit;
4929
4930 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4931
4932 if (lancer_chip(adapter))
4933 status = lancer_fw_download(adapter, fw);
4934 else
4935 status = be_fw_download(adapter, fw);
4936
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004937 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304938 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004939
Ajit Khaparde84517482009-09-04 03:12:16 +00004940fw_exit:
4941 release_firmware(fw);
4942 return status;
4943}
4944
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004945static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4946 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004947{
4948 struct be_adapter *adapter = netdev_priv(dev);
4949 struct nlattr *attr, *br_spec;
4950 int rem;
4951 int status = 0;
4952 u16 mode = 0;
4953
4954 if (!sriov_enabled(adapter))
4955 return -EOPNOTSUPP;
4956
4957 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004958 if (!br_spec)
4959 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004960
4961 nla_for_each_nested(attr, br_spec, rem) {
4962 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4963 continue;
4964
Thomas Grafb7c1a312014-11-26 13:42:17 +01004965 if (nla_len(attr) < sizeof(mode))
4966 return -EINVAL;
4967
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004968 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004969 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4970 return -EOPNOTSUPP;
4971
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004972 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4973 return -EINVAL;
4974
4975 status = be_cmd_set_hsw_config(adapter, 0, 0,
4976 adapter->if_handle,
4977 mode == BRIDGE_MODE_VEPA ?
4978 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004979 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004980 if (status)
4981 goto err;
4982
4983 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4984 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4985
4986 return status;
4987 }
4988err:
4989 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4990 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4991
4992 return status;
4993}
4994
4995static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004996 struct net_device *dev, u32 filter_mask,
4997 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004998{
4999 struct be_adapter *adapter = netdev_priv(dev);
5000 int status = 0;
5001 u8 hsw_mode;
5002
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005003 /* BE and Lancer chips support VEB mode only */
5004 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01005005 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
5006 if (!pci_sriov_get_totalvfs(adapter->pdev))
5007 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005008 hsw_mode = PORT_FWD_TYPE_VEB;
5009 } else {
5010 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005011 adapter->if_handle, &hsw_mode,
5012 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005013 if (status)
5014 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04005015
5016 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5017 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005018 }
5019
5020 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5021 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01005022 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07005023 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005024}
5025
Sathya Perlab7172412016-07-27 05:26:18 -04005026static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5027 void (*func)(struct work_struct *))
5028{
5029 struct be_cmd_work *work;
5030
5031 work = kzalloc(sizeof(*work), GFP_ATOMIC);
5032 if (!work) {
5033 dev_err(&adapter->pdev->dev,
5034 "be_work memory allocation failed\n");
5035 return NULL;
5036 }
5037
5038 INIT_WORK(&work->work, func);
5039 work->adapter = adapter;
5040 return work;
5041}
5042
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005043/* VxLAN offload Notes:
5044 *
5045 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5046 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5047 * is expected to work across all types of IP tunnels once exported. Skyhawk
5048 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305049 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5050 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5051 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005052 *
5053 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5054 * adds more than one port, disable offloads and don't re-enable them again
5055 * until after all the tunnels are removed.
5056 */
Sathya Perlab7172412016-07-27 05:26:18 -04005057static void be_work_add_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305058{
Sathya Perlab7172412016-07-27 05:26:18 -04005059 struct be_cmd_work *cmd_work =
5060 container_of(work, struct be_cmd_work, work);
5061 struct be_adapter *adapter = cmd_work->adapter;
5062 struct net_device *netdev = adapter->netdev;
Sathya Perlac9c47142014-03-27 10:46:19 +05305063 struct device *dev = &adapter->pdev->dev;
Sathya Perlab7172412016-07-27 05:26:18 -04005064 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305065 int status;
5066
Jiri Benc1e5b3112015-09-17 16:11:13 +02005067 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5068 adapter->vxlan_port_aliases++;
Sathya Perlab7172412016-07-27 05:26:18 -04005069 goto done;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005070 }
5071
Sathya Perlac9c47142014-03-27 10:46:19 +05305072 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305073 dev_info(dev,
5074 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005075 dev_info(dev, "Disabling VxLAN offloads\n");
5076 adapter->vxlan_port_count++;
5077 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305078 }
5079
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005080 if (adapter->vxlan_port_count++ >= 1)
Sathya Perlab7172412016-07-27 05:26:18 -04005081 goto done;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005082
Sathya Perlac9c47142014-03-27 10:46:19 +05305083 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5084 OP_CONVERT_NORMAL_TO_TUNNEL);
5085 if (status) {
5086 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5087 goto err;
5088 }
5089
5090 status = be_cmd_set_vxlan_port(adapter, port);
5091 if (status) {
5092 dev_warn(dev, "Failed to add VxLAN port\n");
5093 goto err;
5094 }
5095 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5096 adapter->vxlan_port = port;
5097
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005098 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5099 NETIF_F_TSO | NETIF_F_TSO6 |
5100 NETIF_F_GSO_UDP_TUNNEL;
5101 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305102 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005103
Sathya Perlac9c47142014-03-27 10:46:19 +05305104 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5105 be16_to_cpu(port));
Sathya Perlab7172412016-07-27 05:26:18 -04005106 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305107err:
5108 be_disable_vxlan_offloads(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04005109done:
5110 kfree(cmd_work);
Sathya Perlac9c47142014-03-27 10:46:19 +05305111}
5112
Sathya Perlab7172412016-07-27 05:26:18 -04005113static void be_work_del_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305114{
Sathya Perlab7172412016-07-27 05:26:18 -04005115 struct be_cmd_work *cmd_work =
5116 container_of(work, struct be_cmd_work, work);
5117 struct be_adapter *adapter = cmd_work->adapter;
5118 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305119
5120 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005121 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305122
Jiri Benc1e5b3112015-09-17 16:11:13 +02005123 if (adapter->vxlan_port_aliases) {
5124 adapter->vxlan_port_aliases--;
Sathya Perlab7172412016-07-27 05:26:18 -04005125 goto out;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005126 }
5127
Sathya Perlac9c47142014-03-27 10:46:19 +05305128 be_disable_vxlan_offloads(adapter);
5129
5130 dev_info(&adapter->pdev->dev,
5131 "Disabled VxLAN offloads for UDP port %d\n",
5132 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005133done:
5134 adapter->vxlan_port_count--;
Sathya Perlab7172412016-07-27 05:26:18 -04005135out:
5136 kfree(cmd_work);
5137}
5138
5139static void be_cfg_vxlan_port(struct net_device *netdev,
5140 struct udp_tunnel_info *ti,
5141 void (*func)(struct work_struct *))
5142{
5143 struct be_adapter *adapter = netdev_priv(netdev);
5144 struct be_cmd_work *cmd_work;
5145
5146 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5147 return;
5148
5149 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5150 return;
5151
5152 cmd_work = be_alloc_work(adapter, func);
5153 if (cmd_work) {
5154 cmd_work->info.vxlan_port = ti->port;
5155 queue_work(be_wq, &cmd_work->work);
5156 }
5157}
5158
5159static void be_del_vxlan_port(struct net_device *netdev,
5160 struct udp_tunnel_info *ti)
5161{
5162 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5163}
5164
5165static void be_add_vxlan_port(struct net_device *netdev,
5166 struct udp_tunnel_info *ti)
5167{
5168 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
Sathya Perlac9c47142014-03-27 10:46:19 +05305169}
Joe Stringer725d5482014-11-13 16:38:13 -08005170
Jesse Gross5f352272014-12-23 22:37:26 -08005171static netdev_features_t be_features_check(struct sk_buff *skb,
5172 struct net_device *dev,
5173 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005174{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305175 struct be_adapter *adapter = netdev_priv(dev);
5176 u8 l4_hdr = 0;
5177
Vlad Yasevich9c6cfd52017-05-23 13:38:42 -04005178 /* The code below restricts offload features for some tunneled and
5179 * Q-in-Q packets.
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305180 * Offload features for normal (non tunnel) packets are unchanged.
5181 */
Vlad Yasevich9c6cfd52017-05-23 13:38:42 -04005182 features = vlan_features_check(skb, features);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305183 if (!skb->encapsulation ||
5184 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5185 return features;
5186
5187 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5188 * should disable tunnel offload features if it's not a VxLAN packet,
5189 * as tunnel offloads have been enabled only for VxLAN. This is done to
5190 * allow other tunneled traffic like GRE work fine while VxLAN
5191 * offloads are configured in Skyhawk-R.
5192 */
5193 switch (vlan_get_protocol(skb)) {
5194 case htons(ETH_P_IP):
5195 l4_hdr = ip_hdr(skb)->protocol;
5196 break;
5197 case htons(ETH_P_IPV6):
5198 l4_hdr = ipv6_hdr(skb)->nexthdr;
5199 break;
5200 default:
5201 return features;
5202 }
5203
5204 if (l4_hdr != IPPROTO_UDP ||
5205 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5206 skb->inner_protocol != htons(ETH_P_TEB) ||
5207 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
Sabrina Dubroca49fc90b2017-01-03 16:26:04 +01005208 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5209 !adapter->vxlan_port ||
5210 udp_hdr(skb)->dest != adapter->vxlan_port)
Tom Herberta1882222015-12-14 11:19:43 -08005211 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305212
5213 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005214}
Sathya Perlac9c47142014-03-27 10:46:19 +05305215
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305216static int be_get_phys_port_id(struct net_device *dev,
5217 struct netdev_phys_item_id *ppid)
5218{
5219 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5220 struct be_adapter *adapter = netdev_priv(dev);
5221 u8 *id;
5222
5223 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5224 return -ENOSPC;
5225
5226 ppid->id[0] = adapter->hba_port_num + 1;
5227 id = &ppid->id[1];
5228 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5229 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5230 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5231
5232 ppid->id_len = id_len;
5233
5234 return 0;
5235}
5236
Sathya Perlab7172412016-07-27 05:26:18 -04005237static void be_set_rx_mode(struct net_device *dev)
5238{
5239 struct be_adapter *adapter = netdev_priv(dev);
5240 struct be_cmd_work *work;
5241
5242 work = be_alloc_work(adapter, be_work_set_rx_mode);
5243 if (work)
5244 queue_work(be_wq, &work->work);
5245}
5246
stephen hemmingere5686ad2012-01-05 19:10:25 +00005247static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005248 .ndo_open = be_open,
5249 .ndo_stop = be_close,
5250 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005251 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005252 .ndo_set_mac_address = be_mac_addr_set,
5253 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005254 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005255 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005256 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5257 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005258 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005259 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005260 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005261 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305262 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005263 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005264#ifdef CONFIG_NET_POLL_CONTROLLER
5265 .ndo_poll_controller = be_netpoll,
5266#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005267 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5268 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305269#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305270 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305271#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005272 .ndo_udp_tunnel_add = be_add_vxlan_port,
5273 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005274 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305275 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005276};
5277
5278static void be_netdev_init(struct net_device *netdev)
5279{
5280 struct be_adapter *adapter = netdev_priv(netdev);
5281
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005282 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005283 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005284 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305285 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005286 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005287
5288 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005289 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005290
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005291 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005292 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005293
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005294 netdev->priv_flags |= IFF_UNICAST_FLT;
5295
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005296 netdev->flags |= IFF_MULTICAST;
5297
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305298 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005299
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005300 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005301
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005302 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005303}
5304
Kalesh AP87ac1a52015-02-23 04:20:15 -05005305static void be_cleanup(struct be_adapter *adapter)
5306{
5307 struct net_device *netdev = adapter->netdev;
5308
5309 rtnl_lock();
5310 netif_device_detach(netdev);
5311 if (netif_running(netdev))
5312 be_close(netdev);
5313 rtnl_unlock();
5314
5315 be_clear(adapter);
5316}
5317
Kalesh AP484d76f2015-02-23 04:20:14 -05005318static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005319{
Kalesh APd0e1b312015-02-23 04:20:12 -05005320 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005321 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005322
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005323 status = be_setup(adapter);
5324 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005325 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005326
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005327 rtnl_lock();
5328 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005329 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005330 rtnl_unlock();
5331
5332 if (status)
5333 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005334
Kalesh APd0e1b312015-02-23 04:20:12 -05005335 netif_device_attach(netdev);
5336
Kalesh AP484d76f2015-02-23 04:20:14 -05005337 return 0;
5338}
5339
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305340static void be_soft_reset(struct be_adapter *adapter)
5341{
5342 u32 val;
5343
5344 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5345 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5346 val |= SLIPORT_SOFTRESET_SR_MASK;
5347 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5348}
5349
5350static bool be_err_is_recoverable(struct be_adapter *adapter)
5351{
5352 struct be_error_recovery *err_rec = &adapter->error_recovery;
5353 unsigned long initial_idle_time =
5354 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5355 unsigned long recovery_interval =
5356 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5357 u16 ue_err_code;
5358 u32 val;
5359
5360 val = be_POST_stage_get(adapter);
5361 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5362 return false;
5363 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5364 if (ue_err_code == 0)
5365 return false;
5366
5367 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5368 ue_err_code);
5369
5370 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5371 dev_err(&adapter->pdev->dev,
5372 "Cannot recover within %lu sec from driver load\n",
5373 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5374 return false;
5375 }
5376
5377 if (err_rec->last_recovery_time &&
5378 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5379 dev_err(&adapter->pdev->dev,
5380 "Cannot recover within %lu sec from last recovery\n",
5381 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5382 return false;
5383 }
5384
5385 if (ue_err_code == err_rec->last_err_code) {
5386 dev_err(&adapter->pdev->dev,
5387 "Cannot recover from a consecutive TPE error\n");
5388 return false;
5389 }
5390
5391 err_rec->last_recovery_time = jiffies;
5392 err_rec->last_err_code = ue_err_code;
5393 return true;
5394}
5395
5396static int be_tpe_recover(struct be_adapter *adapter)
5397{
5398 struct be_error_recovery *err_rec = &adapter->error_recovery;
5399 int status = -EAGAIN;
5400 u32 val;
5401
5402 switch (err_rec->recovery_state) {
5403 case ERR_RECOVERY_ST_NONE:
5404 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5405 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5406 break;
5407
5408 case ERR_RECOVERY_ST_DETECT:
5409 val = be_POST_stage_get(adapter);
5410 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5411 POST_STAGE_RECOVERABLE_ERR) {
5412 dev_err(&adapter->pdev->dev,
5413 "Unrecoverable HW error detected: 0x%x\n", val);
5414 status = -EINVAL;
5415 err_rec->resched_delay = 0;
5416 break;
5417 }
5418
5419 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5420
5421 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5422 * milliseconds before it checks for final error status in
5423 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5424 * If it does, then PF0 initiates a Soft Reset.
5425 */
5426 if (adapter->pf_num == 0) {
5427 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5428 err_rec->resched_delay = err_rec->ue_to_reset_time -
5429 ERR_RECOVERY_UE_DETECT_DURATION;
5430 break;
5431 }
5432
5433 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5434 err_rec->resched_delay = err_rec->ue_to_poll_time -
5435 ERR_RECOVERY_UE_DETECT_DURATION;
5436 break;
5437
5438 case ERR_RECOVERY_ST_RESET:
5439 if (!be_err_is_recoverable(adapter)) {
5440 dev_err(&adapter->pdev->dev,
5441 "Failed to meet recovery criteria\n");
5442 status = -EIO;
5443 err_rec->resched_delay = 0;
5444 break;
5445 }
5446 be_soft_reset(adapter);
5447 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5448 err_rec->resched_delay = err_rec->ue_to_poll_time -
5449 err_rec->ue_to_reset_time;
5450 break;
5451
5452 case ERR_RECOVERY_ST_PRE_POLL:
5453 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5454 err_rec->resched_delay = 0;
5455 status = 0; /* done */
5456 break;
5457
5458 default:
5459 status = -EINVAL;
5460 err_rec->resched_delay = 0;
5461 break;
5462 }
5463
5464 return status;
5465}
5466
Kalesh AP484d76f2015-02-23 04:20:14 -05005467static int be_err_recover(struct be_adapter *adapter)
5468{
Kalesh AP484d76f2015-02-23 04:20:14 -05005469 int status;
5470
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305471 if (!lancer_chip(adapter)) {
5472 if (!adapter->error_recovery.recovery_supported ||
5473 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5474 return -EIO;
5475 status = be_tpe_recover(adapter);
5476 if (status)
5477 goto err;
5478 }
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305479
5480 /* Wait for adapter to reach quiescent state before
5481 * destroying queues
5482 */
5483 status = be_fw_wait_ready(adapter);
5484 if (status)
5485 goto err;
5486
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305487 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5488
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305489 be_cleanup(adapter);
5490
Kalesh AP484d76f2015-02-23 04:20:14 -05005491 status = be_resume(adapter);
5492 if (status)
5493 goto err;
5494
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305495 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5496
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005497err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005498 return status;
5499}
5500
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005501static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005502{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305503 struct be_error_recovery *err_rec =
5504 container_of(work, struct be_error_recovery,
5505 err_detection_work.work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005506 struct be_adapter *adapter =
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305507 container_of(err_rec, struct be_adapter,
5508 error_recovery);
5509 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305510 struct device *dev = &adapter->pdev->dev;
5511 int recovery_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005512
5513 be_detect_error(adapter);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305514 if (!be_check_error(adapter, BE_ERROR_HW))
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305515 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005516
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305517 recovery_status = be_err_recover(adapter);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305518 if (!recovery_status) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305519 err_rec->recovery_retries = 0;
5520 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305521 dev_info(dev, "Adapter recovery successful\n");
5522 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305523 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5524 /* BEx/SH recovery state machine */
5525 if (adapter->pf_num == 0 &&
5526 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5527 dev_err(&adapter->pdev->dev,
5528 "Adapter recovery in progress\n");
5529 resched_delay = err_rec->resched_delay;
5530 goto reschedule_task;
5531 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305532 /* For VFs, check if PF have allocated resources
5533 * every second.
5534 */
5535 dev_err(dev, "Re-trying adapter recovery\n");
5536 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305537 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5538 ERR_RECOVERY_MAX_RETRY_COUNT) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305539 /* In case of another error during recovery, it takes 30 sec
5540 * for adapter to come out of error. Retry error recovery after
5541 * this time interval.
5542 */
5543 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305544 resched_delay = ERR_RECOVERY_RETRY_DELAY;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305545 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305546 } else {
5547 dev_err(dev, "Adapter recovery failed\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305548 dev_err(dev, "Please reboot server to recover\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005549 }
5550
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305551 return;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305552
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305553reschedule_task:
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305554 be_schedule_err_detection(adapter, resched_delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005555}
5556
Vasundhara Volam21252372015-02-06 08:18:42 -05005557static void be_log_sfp_info(struct be_adapter *adapter)
5558{
5559 int status;
5560
5561 status = be_cmd_query_sfp_info(adapter);
5562 if (!status) {
5563 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305564 "Port %c: %s Vendor: %s part no: %s",
5565 adapter->port_name,
5566 be_misconfig_evt_port_state[adapter->phy_state],
5567 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005568 adapter->phy.vendor_pn);
5569 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305570 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005571}
5572
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005573static void be_worker(struct work_struct *work)
5574{
5575 struct be_adapter *adapter =
5576 container_of(work, struct be_adapter, work.work);
5577 struct be_rx_obj *rxo;
5578 int i;
5579
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005580 if (be_physfn(adapter) &&
5581 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5582 be_cmd_get_die_temperature(adapter);
5583
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005584 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005585 * mcc completions
5586 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005587 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005588 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005589 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005590 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005591 goto reschedule;
5592 }
5593
5594 if (!adapter->stats_cmd_sent) {
5595 if (lancer_chip(adapter))
5596 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305597 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005598 else
5599 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5600 }
5601
5602 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305603 /* Replenish RX-queues starved due to memory
5604 * allocation failures.
5605 */
5606 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305607 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005608 }
5609
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005610 /* EQ-delay update for Skyhawk is done while notifying EQ */
5611 if (!skyhawk_chip(adapter))
5612 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005613
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305614 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005615 be_log_sfp_info(adapter);
5616
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005617reschedule:
5618 adapter->work_counter++;
Sathya Perlab7172412016-07-27 05:26:18 -04005619 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005620}
5621
Sathya Perla78fad34e2015-02-23 04:20:08 -05005622static void be_unmap_pci_bars(struct be_adapter *adapter)
5623{
5624 if (adapter->csr)
5625 pci_iounmap(adapter->pdev, adapter->csr);
5626 if (adapter->db)
5627 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005628 if (adapter->pcicfg && adapter->pcicfg_mapped)
5629 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005630}
5631
5632static int db_bar(struct be_adapter *adapter)
5633{
Kalesh AP18c57c72015-05-06 05:30:38 -04005634 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005635 return 0;
5636 else
5637 return 4;
5638}
5639
5640static int be_roce_map_pci_bars(struct be_adapter *adapter)
5641{
5642 if (skyhawk_chip(adapter)) {
5643 adapter->roce_db.size = 4096;
5644 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5645 db_bar(adapter));
5646 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5647 db_bar(adapter));
5648 }
5649 return 0;
5650}
5651
5652static int be_map_pci_bars(struct be_adapter *adapter)
5653{
David S. Miller0fa74a42015-03-20 18:51:09 -04005654 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005655 u8 __iomem *addr;
5656 u32 sli_intf;
5657
5658 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5659 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5660 SLI_INTF_FAMILY_SHIFT;
5661 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5662
5663 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a42015-03-20 18:51:09 -04005664 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005665 if (!adapter->csr)
5666 return -ENOMEM;
5667 }
5668
David S. Miller0fa74a42015-03-20 18:51:09 -04005669 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005670 if (!addr)
5671 goto pci_map_err;
5672 adapter->db = addr;
5673
David S. Miller0fa74a42015-03-20 18:51:09 -04005674 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5675 if (be_physfn(adapter)) {
5676 /* PCICFG is the 2nd BAR in BE2 */
5677 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5678 if (!addr)
5679 goto pci_map_err;
5680 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005681 adapter->pcicfg_mapped = true;
David S. Miller0fa74a42015-03-20 18:51:09 -04005682 } else {
5683 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005684 adapter->pcicfg_mapped = false;
David S. Miller0fa74a42015-03-20 18:51:09 -04005685 }
5686 }
5687
Sathya Perla78fad34e2015-02-23 04:20:08 -05005688 be_roce_map_pci_bars(adapter);
5689 return 0;
5690
5691pci_map_err:
David S. Miller0fa74a42015-03-20 18:51:09 -04005692 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005693 be_unmap_pci_bars(adapter);
5694 return -ENOMEM;
5695}
5696
5697static void be_drv_cleanup(struct be_adapter *adapter)
5698{
5699 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5700 struct device *dev = &adapter->pdev->dev;
5701
5702 if (mem->va)
5703 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5704
5705 mem = &adapter->rx_filter;
5706 if (mem->va)
5707 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5708
5709 mem = &adapter->stats_cmd;
5710 if (mem->va)
5711 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5712}
5713
5714/* Allocate and initialize various fields in be_adapter struct */
5715static int be_drv_init(struct be_adapter *adapter)
5716{
5717 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5718 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5719 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5720 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5721 struct device *dev = &adapter->pdev->dev;
5722 int status = 0;
5723
5724 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305725 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5726 &mbox_mem_alloc->dma,
5727 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005728 if (!mbox_mem_alloc->va)
5729 return -ENOMEM;
5730
5731 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5732 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5733 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005734
5735 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5736 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5737 &rx_filter->dma, GFP_KERNEL);
5738 if (!rx_filter->va) {
5739 status = -ENOMEM;
5740 goto free_mbox;
5741 }
5742
5743 if (lancer_chip(adapter))
5744 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5745 else if (BE2_chip(adapter))
5746 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5747 else if (BE3_chip(adapter))
5748 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5749 else
5750 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5751 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5752 &stats_cmd->dma, GFP_KERNEL);
5753 if (!stats_cmd->va) {
5754 status = -ENOMEM;
5755 goto free_rx_filter;
5756 }
5757
5758 mutex_init(&adapter->mbox_lock);
Sathya Perlab7172412016-07-27 05:26:18 -04005759 mutex_init(&adapter->mcc_lock);
5760 mutex_init(&adapter->rx_filter_lock);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005761 spin_lock_init(&adapter->mcc_cq_lock);
5762 init_completion(&adapter->et_cmd_compl);
5763
5764 pci_save_state(adapter->pdev);
5765
5766 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305767
5768 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5769 adapter->error_recovery.resched_delay = 0;
5770 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005771 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005772
5773 adapter->rx_fc = true;
5774 adapter->tx_fc = true;
5775
5776 /* Must be a power of 2 or else MODULO will BUG_ON */
5777 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005778
5779 return 0;
5780
5781free_rx_filter:
5782 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5783free_mbox:
5784 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5785 mbox_mem_alloc->dma);
5786 return status;
5787}
5788
5789static void be_remove(struct pci_dev *pdev)
5790{
5791 struct be_adapter *adapter = pci_get_drvdata(pdev);
5792
5793 if (!adapter)
5794 return;
5795
5796 be_roce_dev_remove(adapter);
5797 be_intr_set(adapter, false);
5798
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005799 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005800
5801 unregister_netdev(adapter->netdev);
5802
5803 be_clear(adapter);
5804
Somnath Koturf72099e2016-09-07 19:57:50 +05305805 if (!pci_vfs_assigned(adapter->pdev))
5806 be_cmd_reset_function(adapter);
5807
Sathya Perla78fad34e2015-02-23 04:20:08 -05005808 /* tell fw we're done with firing cmds */
5809 be_cmd_fw_clean(adapter);
5810
5811 be_unmap_pci_bars(adapter);
5812 be_drv_cleanup(adapter);
5813
5814 pci_disable_pcie_error_reporting(pdev);
5815
5816 pci_release_regions(pdev);
5817 pci_disable_device(pdev);
5818
5819 free_netdev(adapter->netdev);
5820}
5821
Arnd Bergmann9a032592015-05-18 23:06:45 +02005822static ssize_t be_hwmon_show_temp(struct device *dev,
5823 struct device_attribute *dev_attr,
5824 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305825{
5826 struct be_adapter *adapter = dev_get_drvdata(dev);
5827
5828 /* Unit: millidegree Celsius */
5829 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5830 return -EIO;
5831 else
5832 return sprintf(buf, "%u\n",
5833 adapter->hwmon_info.be_on_die_temp * 1000);
5834}
5835
5836static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5837 be_hwmon_show_temp, NULL, 1);
5838
5839static struct attribute *be_hwmon_attrs[] = {
5840 &sensor_dev_attr_temp1_input.dev_attr.attr,
5841 NULL
5842};
5843
5844ATTRIBUTE_GROUPS(be_hwmon);
5845
Sathya Perlad3791422012-09-28 04:39:44 +00005846static char *mc_name(struct be_adapter *adapter)
5847{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305848 char *str = ""; /* default */
5849
5850 switch (adapter->mc_type) {
5851 case UMC:
5852 str = "UMC";
5853 break;
5854 case FLEX10:
5855 str = "FLEX10";
5856 break;
5857 case vNIC1:
5858 str = "vNIC-1";
5859 break;
5860 case nPAR:
5861 str = "nPAR";
5862 break;
5863 case UFP:
5864 str = "UFP";
5865 break;
5866 case vNIC2:
5867 str = "vNIC-2";
5868 break;
5869 default:
5870 str = "";
5871 }
5872
5873 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005874}
5875
5876static inline char *func_name(struct be_adapter *adapter)
5877{
5878 return be_physfn(adapter) ? "PF" : "VF";
5879}
5880
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005881static inline char *nic_name(struct pci_dev *pdev)
5882{
5883 switch (pdev->device) {
5884 case OC_DEVICE_ID1:
5885 return OC_NAME;
5886 case OC_DEVICE_ID2:
5887 return OC_NAME_BE;
5888 case OC_DEVICE_ID3:
5889 case OC_DEVICE_ID4:
5890 return OC_NAME_LANCER;
5891 case BE_DEVICE_ID2:
5892 return BE3_NAME;
5893 case OC_DEVICE_ID5:
5894 case OC_DEVICE_ID6:
5895 return OC_NAME_SH;
5896 default:
5897 return BE_NAME;
5898 }
5899}
5900
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005901static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005902{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005903 struct be_adapter *adapter;
5904 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005905 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005906
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305907 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5908
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005909 status = pci_enable_device(pdev);
5910 if (status)
5911 goto do_none;
5912
5913 status = pci_request_regions(pdev, DRV_NAME);
5914 if (status)
5915 goto disable_dev;
5916 pci_set_master(pdev);
5917
Sathya Perla7f640062012-06-05 19:37:20 +00005918 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305919 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005920 status = -ENOMEM;
5921 goto rel_reg;
5922 }
5923 adapter = netdev_priv(netdev);
5924 adapter->pdev = pdev;
5925 pci_set_drvdata(pdev, adapter);
5926 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005927 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005928
Russell King4c15c242013-06-26 23:49:11 +01005929 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005930 if (!status) {
5931 netdev->features |= NETIF_F_HIGHDMA;
5932 } else {
Russell King4c15c242013-06-26 23:49:11 +01005933 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005934 if (status) {
5935 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5936 goto free_netdev;
5937 }
5938 }
5939
Kalesh AP2f951a92014-09-12 17:39:21 +05305940 status = pci_enable_pcie_error_reporting(pdev);
5941 if (!status)
5942 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005943
Sathya Perla78fad34e2015-02-23 04:20:08 -05005944 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005945 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005946 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005947
Sathya Perla78fad34e2015-02-23 04:20:08 -05005948 status = be_drv_init(adapter);
5949 if (status)
5950 goto unmap_bars;
5951
Sathya Perla5fb379e2009-06-18 00:02:59 +00005952 status = be_setup(adapter);
5953 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005954 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005955
Sathya Perla3abcded2010-10-03 22:12:27 -07005956 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005957 status = register_netdev(netdev);
5958 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005959 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005960
Parav Pandit045508a2012-03-26 14:27:13 +00005961 be_roce_dev_add(adapter);
5962
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305963 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305964 adapter->error_recovery.probe_time = jiffies;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005965
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305966 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005967 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305968 adapter->hwmon_info.hwmon_dev =
5969 devm_hwmon_device_register_with_groups(&pdev->dev,
5970 DRV_NAME,
5971 adapter,
5972 be_hwmon_groups);
5973 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5974 }
5975
Sathya Perlad3791422012-09-28 04:39:44 +00005976 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005977 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005978
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005979 return 0;
5980
Sathya Perla5fb379e2009-06-18 00:02:59 +00005981unsetup:
5982 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005983drv_cleanup:
5984 be_drv_cleanup(adapter);
5985unmap_bars:
5986 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005987free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005988 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005989rel_reg:
5990 pci_release_regions(pdev);
5991disable_dev:
5992 pci_disable_device(pdev);
5993do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005994 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005995 return status;
5996}
5997
5998static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5999{
6000 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006001
Ajit Khaparded4360d62013-11-22 12:51:09 -06006002 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006003 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00006004
Kalesh AP87ac1a52015-02-23 04:20:15 -05006005 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006006
6007 pci_save_state(pdev);
6008 pci_disable_device(pdev);
6009 pci_set_power_state(pdev, pci_choose_state(pdev, state));
6010 return 0;
6011}
6012
Kalesh AP484d76f2015-02-23 04:20:14 -05006013static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006014{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006015 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05006016 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006017
6018 status = pci_enable_device(pdev);
6019 if (status)
6020 return status;
6021
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006022 pci_restore_state(pdev);
6023
Kalesh AP484d76f2015-02-23 04:20:14 -05006024 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00006025 if (status)
6026 return status;
6027
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306028 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006029
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006030 return 0;
6031}
6032
Sathya Perla82456b02010-02-17 01:35:37 +00006033/*
6034 * An FLR will stop BE from DMAing any data.
6035 */
6036static void be_shutdown(struct pci_dev *pdev)
6037{
6038 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006039
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006040 if (!adapter)
6041 return;
Sathya Perla82456b02010-02-17 01:35:37 +00006042
Devesh Sharmad114f992014-06-10 19:32:15 +05306043 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00006044 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006045 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00006046
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006047 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006048
Ajit Khaparde57841862011-04-06 18:08:43 +00006049 be_cmd_reset_function(adapter);
6050
Sathya Perla82456b02010-02-17 01:35:37 +00006051 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006052}
6053
Sathya Perlacf588472010-02-14 21:22:01 +00006054static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05306055 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00006056{
6057 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006058
6059 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6060
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306061 be_roce_dev_remove(adapter);
6062
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306063 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6064 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00006065
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006066 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00006067
Kalesh AP87ac1a52015-02-23 04:20:15 -05006068 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006069 }
Sathya Perlacf588472010-02-14 21:22:01 +00006070
6071 if (state == pci_channel_io_perm_failure)
6072 return PCI_ERS_RESULT_DISCONNECT;
6073
6074 pci_disable_device(pdev);
6075
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006076 /* The error could cause the FW to trigger a flash debug dump.
6077 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006078 * can cause it not to recover; wait for it to finish.
6079 * Wait only for first function as it is needed only once per
6080 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006081 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006082 if (pdev->devfn == 0)
6083 ssleep(30);
6084
Sathya Perlacf588472010-02-14 21:22:01 +00006085 return PCI_ERS_RESULT_NEED_RESET;
6086}
6087
6088static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6089{
6090 struct be_adapter *adapter = pci_get_drvdata(pdev);
6091 int status;
6092
6093 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006094
6095 status = pci_enable_device(pdev);
6096 if (status)
6097 return PCI_ERS_RESULT_DISCONNECT;
6098
6099 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006100 pci_restore_state(pdev);
6101
6102 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00006103 dev_info(&adapter->pdev->dev,
6104 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006105 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006106 if (status)
6107 return PCI_ERS_RESULT_DISCONNECT;
6108
Sathya Perlad6b6d982012-09-05 01:56:48 +00006109 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306110 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00006111 return PCI_ERS_RESULT_RECOVERED;
6112}
6113
6114static void be_eeh_resume(struct pci_dev *pdev)
6115{
6116 int status = 0;
6117 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006118
6119 dev_info(&adapter->pdev->dev, "EEH resume\n");
6120
6121 pci_save_state(pdev);
6122
Kalesh AP484d76f2015-02-23 04:20:14 -05006123 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006124 if (status)
6125 goto err;
6126
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306127 be_roce_dev_add(adapter);
6128
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306129 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00006130 return;
6131err:
6132 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006133}
6134
Vasundhara Volamace40af2015-03-04 00:44:34 -05006135static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6136{
6137 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006138 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05006139 int status;
6140
6141 if (!num_vfs)
6142 be_vf_clear(adapter);
6143
6144 adapter->num_vfs = num_vfs;
6145
6146 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6147 dev_warn(&pdev->dev,
6148 "Cannot disable VFs while they are assigned\n");
6149 return -EBUSY;
6150 }
6151
6152 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6153 * are equally distributed across the max-number of VFs. The user may
6154 * request only a subset of the max-vfs to be enabled.
6155 * Based on num_vfs, redistribute the resources across num_vfs so that
6156 * each VF will have access to more number of resources.
6157 * This facility is not available in BE3 FW.
6158 * Also, this is done by FW in Lancer chip.
6159 */
6160 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006161 be_calculate_vf_res(adapter, adapter->num_vfs,
6162 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006163 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006164 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006165 if (status)
6166 dev_err(&pdev->dev,
6167 "Failed to optimize SR-IOV resources\n");
6168 }
6169
6170 status = be_get_resources(adapter);
6171 if (status)
6172 return be_cmd_status(status);
6173
6174 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6175 rtnl_lock();
6176 status = be_update_queues(adapter);
6177 rtnl_unlock();
6178 if (status)
6179 return be_cmd_status(status);
6180
6181 if (adapter->num_vfs)
6182 status = be_vf_setup(adapter);
6183
6184 if (!status)
6185 return adapter->num_vfs;
6186
6187 return 0;
6188}
6189
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006190static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006191 .error_detected = be_eeh_err_detected,
6192 .slot_reset = be_eeh_reset,
6193 .resume = be_eeh_resume,
6194};
6195
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006196static struct pci_driver be_driver = {
6197 .name = DRV_NAME,
6198 .id_table = be_dev_ids,
6199 .probe = be_probe,
6200 .remove = be_remove,
6201 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006202 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006203 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006204 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006205 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006206};
6207
6208static int __init be_init_module(void)
6209{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306210 int status;
6211
Joe Perches8e95a202009-12-03 07:58:21 +00006212 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6213 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006214 printk(KERN_WARNING DRV_NAME
6215 " : Module param rx_frag_size must be 2048/4096/8192."
6216 " Using 2048\n");
6217 rx_frag_size = 2048;
6218 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006219
Vasundhara Volamace40af2015-03-04 00:44:34 -05006220 if (num_vfs > 0) {
6221 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6222 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6223 }
6224
Sathya Perlab7172412016-07-27 05:26:18 -04006225 be_wq = create_singlethread_workqueue("be_wq");
6226 if (!be_wq) {
6227 pr_warn(DRV_NAME "workqueue creation failed\n");
6228 return -1;
6229 }
6230
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306231 be_err_recovery_workq =
6232 create_singlethread_workqueue("be_err_recover");
6233 if (!be_err_recovery_workq)
6234 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6235
6236 status = pci_register_driver(&be_driver);
6237 if (status) {
6238 destroy_workqueue(be_wq);
6239 be_destroy_err_recovery_workq();
6240 }
6241 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006242}
6243module_init(be_init_module);
6244
6245static void __exit be_exit_module(void)
6246{
6247 pci_unregister_driver(&be_driver);
Sathya Perlab7172412016-07-27 05:26:18 -04006248
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306249 be_destroy_err_recovery_workq();
6250
Sathya Perlab7172412016-07-27 05:26:18 -04006251 if (be_wq)
6252 destroy_workqueue(be_wq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006253}
6254module_exit(be_exit_module);