blob: 5626908f3f7af0235c186abc2cc36914f6c9af6b [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053044/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
Wei Yongjune6053dd2016-09-25 15:40:36 +000047static struct workqueue_struct *be_err_recovery_workq;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053048
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070050 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070051 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070052 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000054 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000055 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000056 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000057 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070058 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
Sathya Perlab7172412016-07-27 05:26:18 -040061
62/* Workqueue used by all functions for defering cmd calls to the adapter */
Wei Yongjune6053dd2016-09-25 15:40:36 +000063static struct workqueue_struct *be_wq;
Sathya Perlab7172412016-07-27 05:26:18 -040064
Ajit Khaparde7c185272010-07-29 06:16:33 +000065/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070066static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000067 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053095 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000099};
Kalesh APe2fb1af2014-09-19 15:46:58 +0530100
Ajit Khaparde7c185272010-07-29 06:16:33 +0000101/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -0700102static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +0000103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530124 "ECRC",
125 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700126 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000134 "Unknown"
135};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530145
Sathya Perla1cfafab2012-02-23 18:50:15 +0000146 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000149 mem->va = NULL;
150 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530154 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000165 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return 0;
167}
168
Somnath Kotur68c45a22013-03-14 02:42:07 +0000169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170{
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530174 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000179 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000181 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000183
Sathya Perladb3ea782011-08-22 19:41:52 +0000184 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530196 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530207
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000213
214 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220{
221 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530222
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000228
229 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000230 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000241
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530242 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000243 return;
244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253}
254
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256{
257 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530258
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000262
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530263 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000264 return;
265
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270}
271
Suresh Reddy988d44b2016-09-07 19:57:52 +0530272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
278 if (ether_addr_equal((u8 *)&adapter->uc_list[i * ETH_ALEN],
279 mac)) {
280 /* mac already added, skip addition */
281 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
282 return 0;
283 }
284 }
285
286 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
287 &adapter->pmac_id[0], 0);
288}
289
290static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
291{
292 int i;
293
294 /* Skip deletion if the programmed mac is
295 * being used in uc-list
296 */
297 for (i = 0; i < adapter->uc_macs; i++) {
298 if (adapter->pmac_id[i + 1] == pmac_id)
299 return;
300 }
301 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
302}
303
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700304static int be_mac_addr_set(struct net_device *netdev, void *p)
305{
306 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700308 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530309 int status;
310 u8 mac[ETH_ALEN];
Suresh Reddy988d44b2016-09-07 19:57:52 +0530311 u32 old_pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000313 if (!is_valid_ether_addr(addr->sa_data))
314 return -EADDRNOTAVAIL;
315
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530316 /* Proceed further only if, User provided MAC is different
317 * from active MAC
318 */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530319 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530320 return 0;
321
Ivan Veceracc439962017-01-13 22:38:29 +0100322 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
323 * address
324 */
325 if (BEx_chip(adapter) && be_virtfn(adapter) &&
326 !check_privilege(adapter, BE_PRIV_FILTMGMT))
327 return -EPERM;
328
Kalesh APbcc84142015-08-05 03:27:48 -0400329 /* if device is not running, copy MAC to netdev->dev_addr */
330 if (!netif_running(netdev))
331 goto done;
332
Sathya Perla5a712c12013-07-23 15:24:59 +0530333 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
334 * privilege or if PF did not provision the new MAC address.
335 * On BE3, this cmd will always fail if the VF doesn't have the
336 * FILTMGMT privilege. This failure is OK, only if the PF programmed
337 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000338 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530339 mutex_lock(&adapter->rx_filter_lock);
340 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
Sathya Perla5a712c12013-07-23 15:24:59 +0530341 if (!status) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530342
343 /* Delete the old programmed MAC. This call may fail if the
344 * old MAC was already deleted by the PF driver.
345 */
346 if (adapter->pmac_id[0] != old_pmac_id)
Suresh Reddy988d44b2016-09-07 19:57:52 +0530347 be_dev_mac_del(adapter, old_pmac_id);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000348 }
349
Suresh Reddy988d44b2016-09-07 19:57:52 +0530350 mutex_unlock(&adapter->rx_filter_lock);
Sathya Perla5a712c12013-07-23 15:24:59 +0530351 /* Decide if the new MAC is successfully activated only after
352 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000353 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530354 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
Suresh Reddyb188f092014-01-15 13:23:39 +0530355 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000356 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000357 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700358
Sathya Perla5a712c12013-07-23 15:24:59 +0530359 /* The MAC change did not happen, either due to lack of privilege
360 * or PF didn't pre-provision.
361 */
dingtianhong61d23e92013-12-30 15:40:43 +0800362 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530363 status = -EPERM;
364 goto err;
365 }
Kalesh APbcc84142015-08-05 03:27:48 -0400366done:
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530367 ether_addr_copy(adapter->dev_mac, addr->sa_data);
Kalesh APbcc84142015-08-05 03:27:48 -0400368 ether_addr_copy(netdev->dev_addr, addr->sa_data);
369 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000370 return 0;
371err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530372 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700373 return status;
374}
375
Sathya Perlaca34fe32012-11-06 17:48:56 +0000376/* BE2 supports only v0 cmd */
377static void *hw_stats_from_cmd(struct be_adapter *adapter)
378{
379 if (BE2_chip(adapter)) {
380 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
381
382 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500383 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000384 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
385
386 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500387 } else {
388 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
389
390 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000391 }
392}
393
394/* BE2 supports only v0 cmd */
395static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
396{
397 if (BE2_chip(adapter)) {
398 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
399
400 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500401 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000402 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
403
404 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500405 } else {
406 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
407
408 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000409 }
410}
411
412static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
415 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
416 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000417 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000418 &rxf_stats->port[adapter->port_num];
419 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420
Sathya Perlaac124ff2011-07-25 19:10:14 +0000421 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000422 drvs->rx_pause_frames = port_stats->rx_pause_frames;
423 drvs->rx_crc_errors = port_stats->rx_crc_errors;
424 drvs->rx_control_frames = port_stats->rx_control_frames;
425 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
426 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
427 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
428 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
429 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
430 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
431 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
432 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
433 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
434 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
435 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000436 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 drvs->rx_dropped_header_too_small =
438 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000439 drvs->rx_address_filtered =
440 port_stats->rx_address_filtered +
441 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000442 drvs->rx_alignment_symbol_errors =
443 port_stats->rx_alignment_symbol_errors;
444
445 drvs->tx_pauseframes = port_stats->tx_pauseframes;
446 drvs->tx_controlframes = port_stats->tx_controlframes;
447
448 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000449 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000450 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000451 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000452 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000453 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000454 drvs->forwarded_packets = rxf_stats->forwarded_packets;
455 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000456 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
457 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000458 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
459}
460
Sathya Perlaca34fe32012-11-06 17:48:56 +0000461static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000462{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000463 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
464 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
465 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000466 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000467 &rxf_stats->port[adapter->port_num];
468 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000469
Sathya Perlaac124ff2011-07-25 19:10:14 +0000470 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000471 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
472 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000473 drvs->rx_pause_frames = port_stats->rx_pause_frames;
474 drvs->rx_crc_errors = port_stats->rx_crc_errors;
475 drvs->rx_control_frames = port_stats->rx_control_frames;
476 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
477 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
478 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
479 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
480 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
481 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
482 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
483 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
484 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
485 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
486 drvs->rx_dropped_header_too_small =
487 port_stats->rx_dropped_header_too_small;
488 drvs->rx_input_fifo_overflow_drop =
489 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000490 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000491 drvs->rx_alignment_symbol_errors =
492 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000493 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000494 drvs->tx_pauseframes = port_stats->tx_pauseframes;
495 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000496 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000497 drvs->jabber_events = port_stats->jabber_events;
498 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000499 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->forwarded_packets = rxf_stats->forwarded_packets;
501 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000502 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
503 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000504 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
505}
506
Ajit Khaparde61000862013-10-03 16:16:33 -0500507static void populate_be_v2_stats(struct be_adapter *adapter)
508{
509 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
510 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
511 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
512 struct be_port_rxf_stats_v2 *port_stats =
513 &rxf_stats->port[adapter->port_num];
514 struct be_drv_stats *drvs = &adapter->drv_stats;
515
516 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
517 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
518 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
519 drvs->rx_pause_frames = port_stats->rx_pause_frames;
520 drvs->rx_crc_errors = port_stats->rx_crc_errors;
521 drvs->rx_control_frames = port_stats->rx_control_frames;
522 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
523 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
524 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
525 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
526 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
527 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
528 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
529 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
530 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
531 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
532 drvs->rx_dropped_header_too_small =
533 port_stats->rx_dropped_header_too_small;
534 drvs->rx_input_fifo_overflow_drop =
535 port_stats->rx_input_fifo_overflow_drop;
536 drvs->rx_address_filtered = port_stats->rx_address_filtered;
537 drvs->rx_alignment_symbol_errors =
538 port_stats->rx_alignment_symbol_errors;
539 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
540 drvs->tx_pauseframes = port_stats->tx_pauseframes;
541 drvs->tx_controlframes = port_stats->tx_controlframes;
542 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
543 drvs->jabber_events = port_stats->jabber_events;
544 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
545 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
546 drvs->forwarded_packets = rxf_stats->forwarded_packets;
547 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
548 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
549 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
550 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530551 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500552 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
553 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
554 drvs->rx_roce_frames = port_stats->roce_frames_received;
555 drvs->roce_drops_crc = port_stats->roce_drops_crc;
556 drvs->roce_drops_payload_len =
557 port_stats->roce_drops_payload_len;
558 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500559}
560
Selvin Xavier005d5692011-05-16 07:36:35 +0000561static void populate_lancer_stats(struct be_adapter *adapter)
562{
Selvin Xavier005d5692011-05-16 07:36:35 +0000563 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530564 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000565
566 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
567 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
568 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
569 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000570 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000571 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000572 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
573 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
574 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
575 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
576 drvs->rx_dropped_tcp_length =
577 pport_stats->rx_dropped_invalid_tcp_length;
578 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
579 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
580 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
581 drvs->rx_dropped_header_too_small =
582 pport_stats->rx_dropped_header_too_small;
583 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000584 drvs->rx_address_filtered =
585 pport_stats->rx_address_filtered +
586 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000587 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000588 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000589 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
590 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000591 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000592 drvs->forwarded_packets = pport_stats->num_forwards_lo;
593 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000594 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000595 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000596}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000597
Sathya Perla09c1c682011-08-22 19:41:53 +0000598static void accumulate_16bit_val(u32 *acc, u16 val)
599{
600#define lo(x) (x & 0xFFFF)
601#define hi(x) (x & 0xFFFF0000)
602 bool wrapped = val < lo(*acc);
603 u32 newacc = hi(*acc) + val;
604
605 if (wrapped)
606 newacc += 65536;
607 ACCESS_ONCE(*acc) = newacc;
608}
609
Jingoo Han4188e7d2013-08-05 18:02:02 +0900610static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530611 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000612{
613 if (!BEx_chip(adapter))
614 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
615 else
616 /* below erx HW counter can actually wrap around after
617 * 65535. Driver accumulates a 32-bit value
618 */
619 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
620 (u16)erx_stat);
621}
622
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000623void be_parse_stats(struct be_adapter *adapter)
624{
Ajit Khaparde61000862013-10-03 16:16:33 -0500625 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000626 struct be_rx_obj *rxo;
627 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000628 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000629
Sathya Perlaca34fe32012-11-06 17:48:56 +0000630 if (lancer_chip(adapter)) {
631 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000632 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000633 if (BE2_chip(adapter))
634 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500635 else if (BE3_chip(adapter))
636 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000637 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500638 else
639 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000640
Ajit Khaparde61000862013-10-03 16:16:33 -0500641 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000642 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000643 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
644 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000645 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000646 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000647}
648
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530650 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000652 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000653 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700654 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000655 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000656 u64 pkts, bytes;
657 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700658 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659
Sathya Perla3abcded2010-10-03 22:12:27 -0700660 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000661 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530662
Sathya Perlaab1594e2011-07-25 19:10:15 +0000663 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700664 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 pkts = rx_stats(rxo)->rx_pkts;
666 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700667 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000668 stats->rx_packets += pkts;
669 stats->rx_bytes += bytes;
670 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
671 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
672 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700673 }
674
Sathya Perla3c8def92011-06-12 20:01:58 +0000675 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000676 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530677
Sathya Perlaab1594e2011-07-25 19:10:15 +0000678 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700679 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000680 pkts = tx_stats(txo)->tx_pkts;
681 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700682 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000683 stats->tx_packets += pkts;
684 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000685 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700686
687 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000688 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000689 drvs->rx_alignment_symbol_errors +
690 drvs->rx_in_range_errors +
691 drvs->rx_out_range_errors +
692 drvs->rx_frame_too_long +
693 drvs->rx_dropped_too_small +
694 drvs->rx_dropped_too_short +
695 drvs->rx_dropped_header_too_small +
696 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000697 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000700 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000701 drvs->rx_out_range_errors +
702 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000703
Sathya Perlaab1594e2011-07-25 19:10:15 +0000704 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705
706 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000707 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000708
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709 /* receiver fifo overrun */
710 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000711 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000712 drvs->rx_input_fifo_overflow_drop +
713 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000714 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715}
716
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000717void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 struct net_device *netdev = adapter->netdev;
720
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000721 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000722 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000723 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000725
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530726 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000727 netif_carrier_on(netdev);
728 else
729 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200730
731 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732}
733
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530734static int be_gso_hdr_len(struct sk_buff *skb)
735{
736 if (skb->encapsulation)
737 return skb_inner_transport_offset(skb) +
738 inner_tcp_hdrlen(skb);
739 return skb_transport_offset(skb) + tcp_hdrlen(skb);
740}
741
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500742static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700743{
Sathya Perla3c8def92011-06-12 20:01:58 +0000744 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530745 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
746 /* Account for headers which get duplicated in TSO pkt */
747 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000748
Sathya Perlaab1594e2011-07-25 19:10:15 +0000749 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000750 stats->tx_reqs++;
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530751 stats->tx_bytes += skb->len + dup_hdr_len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530752 stats->tx_pkts += tx_pkts;
753 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
754 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000755 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756}
757
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500758/* Returns number of WRBs needed for the skb */
759static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500761 /* +1 for the header wrb */
762 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763}
764
765static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
766{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500767 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
768 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
769 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
770 wrb->rsvd0 = 0;
771}
772
773/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
774 * to avoid the swap and shift/mask operations in wrb_fill().
775 */
776static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
777{
778 wrb->frag_pa_hi = 0;
779 wrb->frag_pa_lo = 0;
780 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000781 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782}
783
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000784static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530785 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000786{
787 u8 vlan_prio;
788 u16 vlan_tag;
789
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100790 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000791 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
792 /* If vlan priority provided by OS is NOT in available bmap */
793 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
794 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500795 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000796
797 return vlan_tag;
798}
799
Sathya Perlac9c47142014-03-27 10:46:19 +0530800/* Used only for IP tunnel packets */
801static u16 skb_inner_ip_proto(struct sk_buff *skb)
802{
803 return (inner_ip_hdr(skb)->version == 4) ?
804 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
805}
806
807static u16 skb_ip_proto(struct sk_buff *skb)
808{
809 return (ip_hdr(skb)->version == 4) ?
810 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
811}
812
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530813static inline bool be_is_txq_full(struct be_tx_obj *txo)
814{
815 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
816}
817
818static inline bool be_can_txq_wake(struct be_tx_obj *txo)
819{
820 return atomic_read(&txo->q.used) < txo->q.len / 2;
821}
822
823static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
824{
825 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
826}
827
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530828static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
829 struct sk_buff *skb,
830 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530832 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000834 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530835 BE_WRB_F_SET(wrb_params->features, LSO, 1);
836 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000837 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530838 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530840 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530841 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530842 proto = skb_inner_ip_proto(skb);
843 } else {
844 proto = skb_ip_proto(skb);
845 }
846 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530847 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530848 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530849 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 }
851
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100852 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530853 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
854 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855 }
856
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530857 BE_WRB_F_SET(wrb_params->features, CRC, 1);
858}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500859
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530860static void wrb_fill_hdr(struct be_adapter *adapter,
861 struct be_eth_hdr_wrb *hdr,
862 struct be_wrb_params *wrb_params,
863 struct sk_buff *skb)
864{
865 memset(hdr, 0, sizeof(*hdr));
866
867 SET_TX_WRB_HDR_BITS(crc, hdr,
868 BE_WRB_F_GET(wrb_params->features, CRC));
869 SET_TX_WRB_HDR_BITS(ipcs, hdr,
870 BE_WRB_F_GET(wrb_params->features, IPCS));
871 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
872 BE_WRB_F_GET(wrb_params->features, TCPCS));
873 SET_TX_WRB_HDR_BITS(udpcs, hdr,
874 BE_WRB_F_GET(wrb_params->features, UDPCS));
875
876 SET_TX_WRB_HDR_BITS(lso, hdr,
877 BE_WRB_F_GET(wrb_params->features, LSO));
878 SET_TX_WRB_HDR_BITS(lso6, hdr,
879 BE_WRB_F_GET(wrb_params->features, LSO6));
880 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
881
882 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
883 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500884 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530885 SET_TX_WRB_HDR_BITS(event, hdr,
886 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
887 SET_TX_WRB_HDR_BITS(vlan, hdr,
888 BE_WRB_F_GET(wrb_params->features, VLAN));
889 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
890
891 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
892 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530893 SET_TX_WRB_HDR_BITS(mgmt, hdr,
894 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700895}
896
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000897static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530898 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000899{
900 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500901 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000902
Sathya Perla7101e112010-03-22 20:41:12 +0000903
Sathya Perlaf986afc2015-02-06 08:18:43 -0500904 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
905 (u64)le32_to_cpu(wrb->frag_pa_lo);
906 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000907 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500908 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000909 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500910 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000911 }
912}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700913
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530914/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530915static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700916{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530917 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530919 queue_head_inc(&txo->q);
920 return head;
921}
922
923/* Set up the WRB header for xmit */
924static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
925 struct be_tx_obj *txo,
926 struct be_wrb_params *wrb_params,
927 struct sk_buff *skb, u16 head)
928{
929 u32 num_frags = skb_wrb_cnt(skb);
930 struct be_queue_info *txq = &txo->q;
931 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
932
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530933 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500934 be_dws_cpu_to_le(hdr, sizeof(*hdr));
935
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500936 BUG_ON(txo->sent_skb_list[head]);
937 txo->sent_skb_list[head] = skb;
938 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530939 atomic_add(num_frags, &txq->used);
940 txo->last_req_wrb_cnt = num_frags;
941 txo->pend_wrb_cnt += num_frags;
942}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530944/* Setup a WRB fragment (buffer descriptor) for xmit */
945static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
946 int len)
947{
948 struct be_eth_wrb *wrb;
949 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530951 wrb = queue_head_node(txq);
952 wrb_fill(wrb, busaddr, len);
953 queue_head_inc(txq);
954}
955
956/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
957 * was invoked. The producer index is restored to the previous packet and the
958 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
959 */
960static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530961 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530962 u32 copied)
963{
964 struct device *dev;
965 struct be_eth_wrb *wrb;
966 struct be_queue_info *txq = &txo->q;
967
968 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500969 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530970
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500971 /* skip the first wrb (hdr); it's not mapped */
972 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000973 while (copied) {
974 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000975 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000976 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500977 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000978 queue_head_inc(txq);
979 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530980
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500981 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530982}
983
984/* Enqueue the given packet for transmit. This routine allocates WRBs for the
985 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
986 * of WRBs used up by the packet.
987 */
988static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
989 struct sk_buff *skb,
990 struct be_wrb_params *wrb_params)
991{
992 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
993 struct device *dev = &adapter->pdev->dev;
994 struct be_queue_info *txq = &txo->q;
995 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530996 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530997 dma_addr_t busaddr;
998 int len;
999
1000 head = be_tx_get_wrb_hdr(txo);
1001
1002 if (skb->len > skb->data_len) {
1003 len = skb_headlen(skb);
1004
1005 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1006 if (dma_mapping_error(dev, busaddr))
1007 goto dma_err;
1008 map_single = true;
1009 be_tx_setup_wrb_frag(txo, busaddr, len);
1010 copied += len;
1011 }
1012
1013 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1014 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1015 len = skb_frag_size(frag);
1016
1017 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1018 if (dma_mapping_error(dev, busaddr))
1019 goto dma_err;
1020 be_tx_setup_wrb_frag(txo, busaddr, len);
1021 copied += len;
1022 }
1023
1024 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1025
1026 be_tx_stats_update(txo, skb);
1027 return wrb_cnt;
1028
1029dma_err:
1030 adapter->drv_stats.dma_map_errors++;
1031 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +00001032 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033}
1034
Sathya Perlaf7062ee2015-02-06 08:18:35 -05001035static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1036{
1037 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1038}
1039
Somnath Kotur93040ae2012-06-26 22:32:10 +00001040static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001041 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301042 struct be_wrb_params
1043 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +00001044{
1045 u16 vlan_tag = 0;
1046
1047 skb = skb_share_check(skb, GFP_ATOMIC);
1048 if (unlikely(!skb))
1049 return skb;
1050
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001051 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +00001052 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301053
1054 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1055 if (!vlan_tag)
1056 vlan_tag = adapter->pvid;
1057 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1058 * skip VLAN insertion
1059 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301060 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301061 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001062
1063 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001064 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1065 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001066 if (unlikely(!skb))
1067 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001068 skb->vlan_tci = 0;
1069 }
1070
1071 /* Insert the outer VLAN, if any */
1072 if (adapter->qnq_vid) {
1073 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001074 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1075 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001076 if (unlikely(!skb))
1077 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301078 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001079 }
1080
Somnath Kotur93040ae2012-06-26 22:32:10 +00001081 return skb;
1082}
1083
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001084static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1085{
1086 struct ethhdr *eh = (struct ethhdr *)skb->data;
1087 u16 offset = ETH_HLEN;
1088
1089 if (eh->h_proto == htons(ETH_P_IPV6)) {
1090 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1091
1092 offset += sizeof(struct ipv6hdr);
1093 if (ip6h->nexthdr != NEXTHDR_TCP &&
1094 ip6h->nexthdr != NEXTHDR_UDP) {
1095 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301096 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001097
1098 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1099 if (ehdr->hdrlen == 0xff)
1100 return true;
1101 }
1102 }
1103 return false;
1104}
1105
1106static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1107{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001108 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001109}
1110
Sathya Perla748b5392014-05-09 13:29:13 +05301111static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001112{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001113 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001114}
1115
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301116static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1117 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301118 struct be_wrb_params
1119 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001121 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001122 unsigned int eth_hdr_len;
1123 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001124
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001125 /* For padded packets, BE HW modifies tot_len field in IP header
1126 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001127 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001128 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001129 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1130 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001131 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001132 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001133 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001134 ip = (struct iphdr *)ip_hdr(skb);
1135 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1136 }
1137
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001138 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301139 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001140 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301141 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001142 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301143 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001144
Somnath Kotur93040ae2012-06-26 22:32:10 +00001145 /* HW has a bug wherein it will calculate CSUM for VLAN
1146 * pkts even though it is disabled.
1147 * Manually insert VLAN in pkt.
1148 */
1149 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001150 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301151 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001152 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301153 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001154 }
1155
1156 /* HW may lockup when VLAN HW tagging is requested on
1157 * certain ipv6 packets. Drop such pkts if the HW workaround to
1158 * skip HW tagging is not enabled by FW.
1159 */
1160 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301161 (adapter->pvid || adapter->qnq_vid) &&
1162 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001163 goto tx_drop;
1164
1165 /* Manual VLAN tag insertion to prevent:
1166 * ASIC lockup when the ASIC inserts VLAN tag into
1167 * certain ipv6 packets. Insert VLAN tags in driver,
1168 * and set event, completion, vlan bits accordingly
1169 * in the Tx WRB.
1170 */
1171 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1172 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301173 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001174 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301175 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001176 }
1177
Sathya Perlaee9c7992013-05-22 23:04:55 +00001178 return skb;
1179tx_drop:
1180 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301181err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001182 return NULL;
1183}
1184
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301185static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1186 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301187 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301188{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301189 int err;
1190
Suresh Reddy8227e992015-10-12 03:47:19 -04001191 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1192 * packets that are 32b or less may cause a transmit stall
1193 * on that port. The workaround is to pad such packets
1194 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301195 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001196 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001197 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301198 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301199 }
1200
1201 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301202 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301203 if (!skb)
1204 return NULL;
1205 }
1206
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301207 /* The stack can send us skbs with length greater than
1208 * what the HW can handle. Trim the extra bytes.
1209 */
1210 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1211 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1212 WARN_ON(err);
1213
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301214 return skb;
1215}
1216
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001217static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1218{
1219 struct be_queue_info *txq = &txo->q;
1220 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1221
1222 /* Mark the last request eventable if it hasn't been marked already */
1223 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1224 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1225
1226 /* compose a dummy wrb if there are odd set of wrbs to notify */
1227 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001228 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001229 queue_head_inc(txq);
1230 atomic_inc(&txq->used);
1231 txo->pend_wrb_cnt++;
1232 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1233 TX_HDR_WRB_NUM_SHIFT);
1234 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1235 TX_HDR_WRB_NUM_SHIFT);
1236 }
1237 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1238 txo->pend_wrb_cnt = 0;
1239}
1240
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301241/* OS2BMC related */
1242
1243#define DHCP_CLIENT_PORT 68
1244#define DHCP_SERVER_PORT 67
1245#define NET_BIOS_PORT1 137
1246#define NET_BIOS_PORT2 138
1247#define DHCPV6_RAS_PORT 547
1248
1249#define is_mc_allowed_on_bmc(adapter, eh) \
1250 (!is_multicast_filt_enabled(adapter) && \
1251 is_multicast_ether_addr(eh->h_dest) && \
1252 !is_broadcast_ether_addr(eh->h_dest))
1253
1254#define is_bc_allowed_on_bmc(adapter, eh) \
1255 (!is_broadcast_filt_enabled(adapter) && \
1256 is_broadcast_ether_addr(eh->h_dest))
1257
1258#define is_arp_allowed_on_bmc(adapter, skb) \
1259 (is_arp(skb) && is_arp_filt_enabled(adapter))
1260
1261#define is_broadcast_packet(eh, adapter) \
1262 (is_multicast_ether_addr(eh->h_dest) && \
1263 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1264
1265#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1266
1267#define is_arp_filt_enabled(adapter) \
1268 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1269
1270#define is_dhcp_client_filt_enabled(adapter) \
1271 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1272
1273#define is_dhcp_srvr_filt_enabled(adapter) \
1274 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1275
1276#define is_nbios_filt_enabled(adapter) \
1277 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1278
1279#define is_ipv6_na_filt_enabled(adapter) \
1280 (adapter->bmc_filt_mask & \
1281 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1282
1283#define is_ipv6_ra_filt_enabled(adapter) \
1284 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1285
1286#define is_ipv6_ras_filt_enabled(adapter) \
1287 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1288
1289#define is_broadcast_filt_enabled(adapter) \
1290 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1291
1292#define is_multicast_filt_enabled(adapter) \
1293 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1294
1295static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1296 struct sk_buff **skb)
1297{
1298 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1299 bool os2bmc = false;
1300
1301 if (!be_is_os2bmc_enabled(adapter))
1302 goto done;
1303
1304 if (!is_multicast_ether_addr(eh->h_dest))
1305 goto done;
1306
1307 if (is_mc_allowed_on_bmc(adapter, eh) ||
1308 is_bc_allowed_on_bmc(adapter, eh) ||
1309 is_arp_allowed_on_bmc(adapter, (*skb))) {
1310 os2bmc = true;
1311 goto done;
1312 }
1313
1314 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1315 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1316 u8 nexthdr = hdr->nexthdr;
1317
1318 if (nexthdr == IPPROTO_ICMPV6) {
1319 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1320
1321 switch (icmp6->icmp6_type) {
1322 case NDISC_ROUTER_ADVERTISEMENT:
1323 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1324 goto done;
1325 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1326 os2bmc = is_ipv6_na_filt_enabled(adapter);
1327 goto done;
1328 default:
1329 break;
1330 }
1331 }
1332 }
1333
1334 if (is_udp_pkt((*skb))) {
1335 struct udphdr *udp = udp_hdr((*skb));
1336
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001337 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301338 case DHCP_CLIENT_PORT:
1339 os2bmc = is_dhcp_client_filt_enabled(adapter);
1340 goto done;
1341 case DHCP_SERVER_PORT:
1342 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1343 goto done;
1344 case NET_BIOS_PORT1:
1345 case NET_BIOS_PORT2:
1346 os2bmc = is_nbios_filt_enabled(adapter);
1347 goto done;
1348 case DHCPV6_RAS_PORT:
1349 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1350 goto done;
1351 default:
1352 break;
1353 }
1354 }
1355done:
1356 /* For packets over a vlan, which are destined
1357 * to BMC, asic expects the vlan to be inline in the packet.
1358 */
1359 if (os2bmc)
1360 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1361
1362 return os2bmc;
1363}
1364
Sathya Perlaee9c7992013-05-22 23:04:55 +00001365static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1366{
1367 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001368 u16 q_idx = skb_get_queue_mapping(skb);
1369 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301370 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301371 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001372 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001373
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301374 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001375 if (unlikely(!skb))
1376 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001377
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301378 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1379
1380 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001381 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001382 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001383 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001385
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301386 /* if os2bmc is enabled and if the pkt is destined to bmc,
1387 * enqueue the pkt a 2nd time with mgmt bit set.
1388 */
1389 if (be_send_pkt_to_bmc(adapter, &skb)) {
1390 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1391 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1392 if (unlikely(!wrb_cnt))
1393 goto drop;
1394 else
1395 skb_get(skb);
1396 }
1397
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301398 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001399 netif_stop_subqueue(netdev, q_idx);
1400 tx_stats(txo)->tx_stops++;
1401 }
1402
1403 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1404 be_xmit_flush(adapter, txo);
1405
1406 return NETDEV_TX_OK;
1407drop:
1408 tx_stats(txo)->tx_drv_drops++;
1409 /* Flush the already enqueued tx requests */
1410 if (flush && txo->pend_wrb_cnt)
1411 be_xmit_flush(adapter, txo);
1412
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 return NETDEV_TX_OK;
1414}
1415
1416static int be_change_mtu(struct net_device *netdev, int new_mtu)
1417{
1418 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301419 struct device *dev = &adapter->pdev->dev;
1420
1421 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1422 dev_info(dev, "MTU must be between %d and %d bytes\n",
1423 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424 return -EINVAL;
1425 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301426
1427 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301428 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001429 netdev->mtu = new_mtu;
1430 return 0;
1431}
1432
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001433static inline bool be_in_all_promisc(struct be_adapter *adapter)
1434{
1435 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1436 BE_IF_FLAGS_ALL_PROMISCUOUS;
1437}
1438
1439static int be_set_vlan_promisc(struct be_adapter *adapter)
1440{
1441 struct device *dev = &adapter->pdev->dev;
1442 int status;
1443
1444 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1445 return 0;
1446
1447 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1448 if (!status) {
1449 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1450 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1451 } else {
1452 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1453 }
1454 return status;
1455}
1456
1457static int be_clear_vlan_promisc(struct be_adapter *adapter)
1458{
1459 struct device *dev = &adapter->pdev->dev;
1460 int status;
1461
1462 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1463 if (!status) {
1464 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1465 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1466 }
1467 return status;
1468}
1469
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001471 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1472 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473 */
Sathya Perla10329df2012-06-05 19:37:18 +00001474static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475{
Vasundhara Volam50762662014-09-12 17:39:14 +05301476 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001477 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301478 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001479 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001480
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001481 /* No need to change the VLAN state if the I/F is in promiscuous */
1482 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001483 return 0;
1484
Sathya Perla92bf14a2013-08-27 16:57:32 +05301485 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001486 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001487
Somnath Kotur841f60f2016-07-27 05:26:15 -04001488 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1489 status = be_clear_vlan_promisc(adapter);
1490 if (status)
1491 return status;
1492 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001493 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301494 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1495 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001496
Vasundhara Volam435452a2015-03-20 06:28:23 -04001497 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001498 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001499 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001500 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001501 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1502 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301503 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001504 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001505 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001506 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001507}
1508
Patrick McHardy80d5c362013-04-19 02:04:28 +00001509static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001510{
1511 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001512 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513
Sathya Perlab7172412016-07-27 05:26:18 -04001514 mutex_lock(&adapter->rx_filter_lock);
1515
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001516 /* Packets with VID 0 are always received by Lancer by default */
1517 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001518 goto done;
Vasundhara Volam48291c22014-03-11 18:53:08 +05301519
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301520 if (test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001521 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001522
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301523 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301524 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001525
Sathya Perlab7172412016-07-27 05:26:18 -04001526 status = be_vid_config(adapter);
1527done:
1528 mutex_unlock(&adapter->rx_filter_lock);
1529 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001530}
1531
Patrick McHardy80d5c362013-04-19 02:04:28 +00001532static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533{
1534 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perlab7172412016-07-27 05:26:18 -04001535 int status = 0;
1536
1537 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001538
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001539 /* Packets with VID 0 are always received by Lancer by default */
1540 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001541 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001542
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301543 if (!test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001544 goto done;
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301545
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301546 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301547 adapter->vlans_added--;
1548
Sathya Perlab7172412016-07-27 05:26:18 -04001549 status = be_vid_config(adapter);
1550done:
1551 mutex_unlock(&adapter->rx_filter_lock);
1552 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553}
1554
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001555static void be_set_all_promisc(struct be_adapter *adapter)
1556{
1557 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1558 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1559}
1560
1561static void be_set_mc_promisc(struct be_adapter *adapter)
1562{
1563 int status;
1564
1565 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1566 return;
1567
1568 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1569 if (!status)
1570 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1571}
1572
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001573static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001574{
1575 int status;
1576
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001577 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1578 return;
1579
1580 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001581 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001582 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1583}
1584
1585static void be_clear_uc_promisc(struct be_adapter *adapter)
1586{
1587 int status;
1588
1589 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1590 return;
1591
1592 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1593 if (!status)
1594 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1595}
1596
1597/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1598 * We use a single callback function for both sync and unsync. We really don't
1599 * add/remove addresses through this callback. But, we use it to detect changes
1600 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1601 */
1602static int be_uc_list_update(struct net_device *netdev,
1603 const unsigned char *addr)
1604{
1605 struct be_adapter *adapter = netdev_priv(netdev);
1606
1607 adapter->update_uc_list = true;
1608 return 0;
1609}
1610
1611static int be_mc_list_update(struct net_device *netdev,
1612 const unsigned char *addr)
1613{
1614 struct be_adapter *adapter = netdev_priv(netdev);
1615
1616 adapter->update_mc_list = true;
1617 return 0;
1618}
1619
1620static void be_set_mc_list(struct be_adapter *adapter)
1621{
1622 struct net_device *netdev = adapter->netdev;
Sathya Perlab7172412016-07-27 05:26:18 -04001623 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001624 bool mc_promisc = false;
1625 int status;
1626
Sathya Perlab7172412016-07-27 05:26:18 -04001627 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001628 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1629
1630 if (netdev->flags & IFF_PROMISC) {
1631 adapter->update_mc_list = false;
1632 } else if (netdev->flags & IFF_ALLMULTI ||
1633 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1634 /* Enable multicast promisc if num configured exceeds
1635 * what we support
1636 */
1637 mc_promisc = true;
1638 adapter->update_mc_list = false;
1639 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1640 /* Update mc-list unconditionally if the iface was previously
1641 * in mc-promisc mode and now is out of that mode.
1642 */
1643 adapter->update_mc_list = true;
1644 }
1645
Sathya Perlab7172412016-07-27 05:26:18 -04001646 if (adapter->update_mc_list) {
1647 int i = 0;
1648
1649 /* cache the mc-list in adapter */
1650 netdev_for_each_mc_addr(ha, netdev) {
1651 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1652 i++;
1653 }
1654 adapter->mc_count = netdev_mc_count(netdev);
1655 }
1656 netif_addr_unlock_bh(netdev);
1657
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001658 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001659 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001660 } else if (adapter->update_mc_list) {
1661 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1662 if (!status)
1663 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1664 else
1665 be_set_mc_promisc(adapter);
1666
1667 adapter->update_mc_list = false;
1668 }
1669}
1670
1671static void be_clear_mc_list(struct be_adapter *adapter)
1672{
1673 struct net_device *netdev = adapter->netdev;
1674
1675 __dev_mc_unsync(netdev, NULL);
1676 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlab7172412016-07-27 05:26:18 -04001677 adapter->mc_count = 0;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001678}
1679
Suresh Reddy988d44b2016-09-07 19:57:52 +05301680static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1681{
1682 if (ether_addr_equal((u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
Suresh Reddyc27ebf52016-09-07 19:57:53 +05301683 adapter->dev_mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05301684 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1685 return 0;
1686 }
1687
1688 return be_cmd_pmac_add(adapter,
1689 (u8 *)&adapter->uc_list[uc_idx * ETH_ALEN],
1690 adapter->if_handle,
1691 &adapter->pmac_id[uc_idx + 1], 0);
1692}
1693
1694static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1695{
1696 if (pmac_id == adapter->pmac_id[0])
1697 return;
1698
1699 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1700}
1701
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001702static void be_set_uc_list(struct be_adapter *adapter)
1703{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001704 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001705 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001706 bool uc_promisc = false;
Sathya Perlab7172412016-07-27 05:26:18 -04001707 int curr_uc_macs = 0, i;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001708
Sathya Perlab7172412016-07-27 05:26:18 -04001709 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001710 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001711
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001712 if (netdev->flags & IFF_PROMISC) {
1713 adapter->update_uc_list = false;
1714 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1715 uc_promisc = true;
1716 adapter->update_uc_list = false;
1717 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1718 /* Update uc-list unconditionally if the iface was previously
1719 * in uc-promisc mode and now is out of that mode.
1720 */
1721 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001722 }
1723
Sathya Perlab7172412016-07-27 05:26:18 -04001724 if (adapter->update_uc_list) {
1725 i = 1; /* First slot is claimed by the Primary MAC */
1726
1727 /* cache the uc-list in adapter array */
1728 netdev_for_each_uc_addr(ha, netdev) {
1729 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1730 i++;
1731 }
1732 curr_uc_macs = netdev_uc_count(netdev);
1733 }
1734 netif_addr_unlock_bh(netdev);
1735
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001736 if (uc_promisc) {
1737 be_set_uc_promisc(adapter);
1738 } else if (adapter->update_uc_list) {
1739 be_clear_uc_promisc(adapter);
1740
Sathya Perlab7172412016-07-27 05:26:18 -04001741 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301742 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001743
Sathya Perlab7172412016-07-27 05:26:18 -04001744 for (i = 0; i < curr_uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301745 be_uc_mac_add(adapter, i);
Sathya Perlab7172412016-07-27 05:26:18 -04001746 adapter->uc_macs = curr_uc_macs;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001747 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001748 }
1749}
1750
1751static void be_clear_uc_list(struct be_adapter *adapter)
1752{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001753 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001754 int i;
1755
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001756 __dev_uc_unsync(netdev, NULL);
Sathya Perlab7172412016-07-27 05:26:18 -04001757 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301758 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1759
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001760 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301761}
1762
Sathya Perlab7172412016-07-27 05:26:18 -04001763static void __be_set_rx_mode(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764{
Sathya Perlab7172412016-07-27 05:26:18 -04001765 struct net_device *netdev = adapter->netdev;
1766
1767 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001768
1769 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001770 if (!be_in_all_promisc(adapter))
1771 be_set_all_promisc(adapter);
1772 } else if (be_in_all_promisc(adapter)) {
1773 /* We need to re-program the vlan-list or clear
1774 * vlan-promisc mode (if needed) when the interface
1775 * comes out of promisc mode.
1776 */
1777 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001779
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001780 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001781 be_set_mc_list(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04001782
1783 mutex_unlock(&adapter->rx_filter_lock);
1784}
1785
1786static void be_work_set_rx_mode(struct work_struct *work)
1787{
1788 struct be_cmd_work *cmd_work =
1789 container_of(work, struct be_cmd_work, work);
1790
1791 __be_set_rx_mode(cmd_work->adapter);
1792 kfree(cmd_work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001793}
1794
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001795static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1796{
1797 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001798 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001799 int status;
1800
Sathya Perla11ac75e2011-12-13 00:58:50 +00001801 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001802 return -EPERM;
1803
Sathya Perla11ac75e2011-12-13 00:58:50 +00001804 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001805 return -EINVAL;
1806
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301807 /* Proceed further only if user provided MAC is different
1808 * from active MAC
1809 */
1810 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1811 return 0;
1812
Sathya Perla3175d8c2013-07-23 15:25:03 +05301813 if (BEx_chip(adapter)) {
1814 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1815 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001816
Sathya Perla11ac75e2011-12-13 00:58:50 +00001817 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1818 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301819 } else {
1820 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1821 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001822 }
1823
Kalesh APabccf232014-07-17 16:20:24 +05301824 if (status) {
1825 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1826 mac, vf, status);
1827 return be_cmd_status(status);
1828 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001829
Kalesh APabccf232014-07-17 16:20:24 +05301830 ether_addr_copy(vf_cfg->mac_addr, mac);
1831
1832 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001833}
1834
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001835static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301836 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001837{
1838 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001839 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001840
Sathya Perla11ac75e2011-12-13 00:58:50 +00001841 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001842 return -EPERM;
1843
Sathya Perla11ac75e2011-12-13 00:58:50 +00001844 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001845 return -EINVAL;
1846
1847 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001848 vi->max_tx_rate = vf_cfg->tx_rate;
1849 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001850 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1851 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001852 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301853 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001854 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001855
1856 return 0;
1857}
1858
Vasundhara Volam435452a2015-03-20 06:28:23 -04001859static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1860{
1861 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1862 u16 vids[BE_NUM_VLANS_SUPPORTED];
1863 int vf_if_id = vf_cfg->if_handle;
1864 int status;
1865
1866 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001867 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001868 if (status)
1869 return status;
1870
1871 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1872 vids[0] = 0;
1873 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1874 if (!status)
1875 dev_info(&adapter->pdev->dev,
1876 "Cleared guest VLANs on VF%d", vf);
1877
1878 /* After TVT is enabled, disallow VFs to program VLAN filters */
1879 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1880 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1881 ~BE_PRIV_FILTMGMT, vf + 1);
1882 if (!status)
1883 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1884 }
1885 return 0;
1886}
1887
1888static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1889{
1890 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1891 struct device *dev = &adapter->pdev->dev;
1892 int status;
1893
1894 /* Reset Transparent VLAN Tagging. */
1895 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001896 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001897 if (status)
1898 return status;
1899
1900 /* Allow VFs to program VLAN filtering */
1901 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1902 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1903 BE_PRIV_FILTMGMT, vf + 1);
1904 if (!status) {
1905 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1906 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1907 }
1908 }
1909
1910 dev_info(dev,
1911 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1912 return 0;
1913}
1914
Moshe Shemesh79aab092016-09-22 12:11:15 +03001915static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1916 __be16 vlan_proto)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001917{
1918 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001919 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001920 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001921
Sathya Perla11ac75e2011-12-13 00:58:50 +00001922 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001923 return -EPERM;
1924
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001925 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001926 return -EINVAL;
1927
Moshe Shemesh79aab092016-09-22 12:11:15 +03001928 if (vlan_proto != htons(ETH_P_8021Q))
1929 return -EPROTONOSUPPORT;
1930
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001931 if (vlan || qos) {
1932 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001933 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001934 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001935 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001936 }
1937
Kalesh APabccf232014-07-17 16:20:24 +05301938 if (status) {
1939 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001940 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1941 status);
Kalesh APabccf232014-07-17 16:20:24 +05301942 return be_cmd_status(status);
1943 }
1944
1945 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301946 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001947}
1948
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001949static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1950 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001951{
1952 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301953 struct device *dev = &adapter->pdev->dev;
1954 int percent_rate, status = 0;
1955 u16 link_speed = 0;
1956 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001957
Sathya Perla11ac75e2011-12-13 00:58:50 +00001958 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001959 return -EPERM;
1960
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001961 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001962 return -EINVAL;
1963
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001964 if (min_tx_rate)
1965 return -EINVAL;
1966
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301967 if (!max_tx_rate)
1968 goto config_qos;
1969
1970 status = be_cmd_link_status_query(adapter, &link_speed,
1971 &link_status, 0);
1972 if (status)
1973 goto err;
1974
1975 if (!link_status) {
1976 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301977 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301978 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001979 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001980
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301981 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1982 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1983 link_speed);
1984 status = -EINVAL;
1985 goto err;
1986 }
1987
1988 /* On Skyhawk the QOS setting must be done only as a % value */
1989 percent_rate = link_speed / 100;
1990 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1991 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1992 percent_rate);
1993 status = -EINVAL;
1994 goto err;
1995 }
1996
1997config_qos:
1998 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001999 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05302000 goto err;
2001
2002 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
2003 return 0;
2004
2005err:
2006 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
2007 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05302008 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00002009}
Kalesh APe2fb1af2014-09-19 15:46:58 +05302010
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302011static int be_set_vf_link_state(struct net_device *netdev, int vf,
2012 int link_state)
2013{
2014 struct be_adapter *adapter = netdev_priv(netdev);
2015 int status;
2016
2017 if (!sriov_enabled(adapter))
2018 return -EPERM;
2019
2020 if (vf >= adapter->num_vfs)
2021 return -EINVAL;
2022
2023 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05302024 if (status) {
2025 dev_err(&adapter->pdev->dev,
2026 "Link state change on VF %d failed: %#x\n", vf, status);
2027 return be_cmd_status(status);
2028 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302029
Kalesh APabccf232014-07-17 16:20:24 +05302030 adapter->vf_cfg[vf].plink_tracking = link_state;
2031
2032 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302033}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002034
Kalesh APe7bcbd72015-05-06 05:30:32 -04002035static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2036{
2037 struct be_adapter *adapter = netdev_priv(netdev);
2038 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2039 u8 spoofchk;
2040 int status;
2041
2042 if (!sriov_enabled(adapter))
2043 return -EPERM;
2044
2045 if (vf >= adapter->num_vfs)
2046 return -EINVAL;
2047
2048 if (BEx_chip(adapter))
2049 return -EOPNOTSUPP;
2050
2051 if (enable == vf_cfg->spoofchk)
2052 return 0;
2053
2054 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2055
2056 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2057 0, spoofchk);
2058 if (status) {
2059 dev_err(&adapter->pdev->dev,
2060 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2061 return be_cmd_status(status);
2062 }
2063
2064 vf_cfg->spoofchk = enable;
2065 return 0;
2066}
2067
Sathya Perla2632baf2013-10-01 16:00:00 +05302068static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2069 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070{
Sathya Perla2632baf2013-10-01 16:00:00 +05302071 aic->rx_pkts_prev = rx_pkts;
2072 aic->tx_reqs_prev = tx_pkts;
2073 aic->jiffies = now;
2074}
Sathya Perlaac124ff2011-07-25 19:10:14 +00002075
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002076static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05302077{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002078 struct be_adapter *adapter = eqo->adapter;
2079 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05302080 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05302081 struct be_rx_obj *rxo;
2082 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002083 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05302084 ulong now;
2085 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002086 int i;
2087
2088 aic = &adapter->aic_obj[eqo->idx];
2089 if (!aic->enable) {
2090 if (aic->jiffies)
2091 aic->jiffies = 0;
2092 eqd = aic->et_eqd;
2093 return eqd;
2094 }
2095
2096 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2097 do {
2098 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2099 rx_pkts += rxo->stats.rx_pkts;
2100 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2101 }
2102
2103 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2104 do {
2105 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2106 tx_pkts += txo->stats.tx_reqs;
2107 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2108 }
2109
2110 /* Skip, if wrapped around or first calculation */
2111 now = jiffies;
2112 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2113 rx_pkts < aic->rx_pkts_prev ||
2114 tx_pkts < aic->tx_reqs_prev) {
2115 be_aic_update(aic, rx_pkts, tx_pkts, now);
2116 return aic->prev_eqd;
2117 }
2118
2119 delta = jiffies_to_msecs(now - aic->jiffies);
2120 if (delta == 0)
2121 return aic->prev_eqd;
2122
2123 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2124 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2125 eqd = (pps / 15000) << 2;
2126
2127 if (eqd < 8)
2128 eqd = 0;
2129 eqd = min_t(u32, eqd, aic->max_eqd);
2130 eqd = max_t(u32, eqd, aic->min_eqd);
2131
2132 be_aic_update(aic, rx_pkts, tx_pkts, now);
2133
2134 return eqd;
2135}
2136
2137/* For Skyhawk-R only */
2138static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2139{
2140 struct be_adapter *adapter = eqo->adapter;
2141 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2142 ulong now = jiffies;
2143 int eqd;
2144 u32 mult_enc;
2145
2146 if (!aic->enable)
2147 return 0;
2148
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302149 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002150 eqd = aic->prev_eqd;
2151 else
2152 eqd = be_get_new_eqd(eqo);
2153
2154 if (eqd > 100)
2155 mult_enc = R2I_DLY_ENC_1;
2156 else if (eqd > 60)
2157 mult_enc = R2I_DLY_ENC_2;
2158 else if (eqd > 20)
2159 mult_enc = R2I_DLY_ENC_3;
2160 else
2161 mult_enc = R2I_DLY_ENC_0;
2162
2163 aic->prev_eqd = eqd;
2164
2165 return mult_enc;
2166}
2167
2168void be_eqd_update(struct be_adapter *adapter, bool force_update)
2169{
2170 struct be_set_eqd set_eqd[MAX_EVT_QS];
2171 struct be_aic_obj *aic;
2172 struct be_eq_obj *eqo;
2173 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174
Sathya Perla2632baf2013-10-01 16:00:00 +05302175 for_all_evt_queues(adapter, eqo, i) {
2176 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002177 eqd = be_get_new_eqd(eqo);
2178 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302179 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2180 set_eqd[num].eq_id = eqo->q.id;
2181 aic->prev_eqd = eqd;
2182 num++;
2183 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002184 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302185
2186 if (num)
2187 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002188}
2189
Sathya Perla3abcded2010-10-03 22:12:27 -07002190static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302191 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002192{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002193 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002194
Sathya Perlaab1594e2011-07-25 19:10:15 +00002195 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002196 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002197 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002198 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302199 if (rxcp->tunneled)
2200 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002201 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002202 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002203 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002204 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002205 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206}
2207
Sathya Perla2e588f82011-03-11 02:49:26 +00002208static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002209{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002210 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302211 * Also ignore ipcksm for ipv6 pkts
2212 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002213 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302214 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002215}
2216
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302217static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002219 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002221 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302222 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223
Sathya Perla3abcded2010-10-03 22:12:27 -07002224 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225 BUG_ON(!rx_page_info->page);
2226
Sathya Perlae50287b2014-03-04 12:14:38 +05302227 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002228 dma_unmap_page(&adapter->pdev->dev,
2229 dma_unmap_addr(rx_page_info, bus),
2230 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302231 rx_page_info->last_frag = false;
2232 } else {
2233 dma_sync_single_for_cpu(&adapter->pdev->dev,
2234 dma_unmap_addr(rx_page_info, bus),
2235 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002236 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002237
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302238 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002239 atomic_dec(&rxq->used);
2240 return rx_page_info;
2241}
2242
2243/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002244static void be_rx_compl_discard(struct be_rx_obj *rxo,
2245 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002248 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002250 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302251 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002252 put_page(page_info->page);
2253 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254 }
2255}
2256
2257/*
2258 * skb_fill_rx_data forms a complete skb for an ether frame
2259 * indicated by rxcp.
2260 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2262 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002264 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002265 u16 i, j;
2266 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267 u8 *start;
2268
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302269 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002270 start = page_address(page_info->page) + page_info->page_offset;
2271 prefetch(start);
2272
2273 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002274 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276 skb->len = curr_frag_len;
2277 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002278 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279 /* Complete packet has now been moved to data */
2280 put_page(page_info->page);
2281 skb->data_len = 0;
2282 skb->tail += curr_frag_len;
2283 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002284 hdr_len = ETH_HLEN;
2285 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002287 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288 skb_shinfo(skb)->frags[0].page_offset =
2289 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302290 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2291 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002292 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002293 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294 skb->tail += hdr_len;
2295 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002296 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297
Sathya Perla2e588f82011-03-11 02:49:26 +00002298 if (rxcp->pkt_size <= rx_frag_size) {
2299 BUG_ON(rxcp->num_rcvd != 1);
2300 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301 }
2302
2303 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002304 remaining = rxcp->pkt_size - curr_frag_len;
2305 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302306 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002307 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002309 /* Coalesce all frags from the same physical page in one slot */
2310 if (page_info->page_offset == 0) {
2311 /* Fresh page */
2312 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002313 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002314 skb_shinfo(skb)->frags[j].page_offset =
2315 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002316 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002317 skb_shinfo(skb)->nr_frags++;
2318 } else {
2319 put_page(page_info->page);
2320 }
2321
Eric Dumazet9e903e02011-10-18 21:00:24 +00002322 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323 skb->len += curr_frag_len;
2324 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002325 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002326 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002327 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002329 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330}
2331
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002332/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302333static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002334 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002335{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002336 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002337 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002338 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002339
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002340 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002341 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002342 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002343 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002344 return;
2345 }
2346
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002347 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002348
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002349 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002350 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002351 else
2352 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002354 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002355 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002356 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002357 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302358
Tom Herbertb6c0e892014-08-27 21:27:17 -07002359 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302360 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002361
Jiri Pirko343e43c2011-08-25 02:50:51 +00002362 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002363 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002364
2365 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002366}
2367
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002368/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002369static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2370 struct napi_struct *napi,
2371 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002373 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002375 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002376 u16 remaining, curr_frag_len;
2377 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002378
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002379 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002380 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002382 return;
2383 }
2384
Sathya Perla2e588f82011-03-11 02:49:26 +00002385 remaining = rxcp->pkt_size;
2386 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302387 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388
2389 curr_frag_len = min(remaining, rx_frag_size);
2390
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002391 /* Coalesce all frags from the same physical page in one slot */
2392 if (i == 0 || page_info->page_offset == 0) {
2393 /* First frag or Fresh page */
2394 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002395 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002396 skb_shinfo(skb)->frags[j].page_offset =
2397 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002398 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002399 } else {
2400 put_page(page_info->page);
2401 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002402 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002403 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002404 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002405 memset(page_info, 0, sizeof(*page_info));
2406 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002407 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002408
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002409 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002410 skb->len = rxcp->pkt_size;
2411 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002412 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002413 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002414 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002415 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302416
Tom Herbertb6c0e892014-08-27 21:27:17 -07002417 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002418
Jiri Pirko343e43c2011-08-25 02:50:51 +00002419 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002420 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002421
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002422 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002423}
2424
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002425static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2426 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002427{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302428 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2429 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2430 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2431 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2432 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2433 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2434 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2435 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2436 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2437 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2438 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002439 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302440 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2441 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002442 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302443 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302444 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302445 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002446}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002447
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002448static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2449 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002450{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302451 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2452 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2453 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2454 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2455 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2456 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2457 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2458 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2459 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2460 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2461 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002462 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302463 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2464 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002465 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302466 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2467 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002468}
2469
2470static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2471{
2472 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2473 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2474 struct be_adapter *adapter = rxo->adapter;
2475
2476 /* For checking the valid bit it is Ok to use either definition as the
2477 * valid bit is at the same position in both v0 and v1 Rx compl */
2478 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479 return NULL;
2480
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002481 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002482 be_dws_le_to_cpu(compl, sizeof(*compl));
2483
2484 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002485 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002486 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002487 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002488
Somnath Koture38b1702013-05-29 22:55:56 +00002489 if (rxcp->ip_frag)
2490 rxcp->l4_csum = 0;
2491
Sathya Perla15d72182011-03-21 20:49:26 +00002492 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302493 /* In QNQ modes, if qnq bit is not set, then the packet was
2494 * tagged only with the transparent outer vlan-tag and must
2495 * not be treated as a vlan packet by host
2496 */
2497 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002498 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002499
Sathya Perla15d72182011-03-21 20:49:26 +00002500 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002501 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002502
Somnath Kotur939cf302011-08-18 21:51:49 -07002503 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302504 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002505 rxcp->vlanf = 0;
2506 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002507
2508 /* As the compl has been parsed, reset it; we wont touch it again */
2509 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510
Sathya Perla3abcded2010-10-03 22:12:27 -07002511 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002512 return rxcp;
2513}
2514
Eric Dumazet1829b082011-03-01 05:48:12 +00002515static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002517 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002518
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002520 gfp |= __GFP_COMP;
2521 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002522}
2523
2524/*
2525 * Allocate a page, split it to fragments of size rx_frag_size and post as
2526 * receive buffers to BE
2527 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302528static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002529{
Sathya Perla3abcded2010-10-03 22:12:27 -07002530 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002531 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002532 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002534 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002535 struct be_eth_rx_d *rxd;
2536 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302537 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002538
Sathya Perla3abcded2010-10-03 22:12:27 -07002539 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302540 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002542 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002543 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002544 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002545 break;
2546 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002547 page_dmaaddr = dma_map_page(dev, pagep, 0,
2548 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002549 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002550 if (dma_mapping_error(dev, page_dmaaddr)) {
2551 put_page(pagep);
2552 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302553 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002554 break;
2555 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302556 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002557 } else {
2558 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302559 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002560 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302561 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002562 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002563
2564 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302565 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002566 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2567 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002568
2569 /* Any space left in the current big page for another frag? */
2570 if ((page_offset + rx_frag_size + rx_frag_size) >
2571 adapter->big_page_size) {
2572 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302573 page_info->last_frag = true;
2574 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2575 } else {
2576 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002577 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002578
2579 prev_page_info = page_info;
2580 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002582 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302583
2584 /* Mark the last frag of a page when we break out of the above loop
2585 * with no more slots available in the RXQ
2586 */
2587 if (pagep) {
2588 prev_page_info->last_frag = true;
2589 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2590 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591
2592 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302594 if (rxo->rx_post_starved)
2595 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302596 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002597 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302598 be_rxq_notify(adapter, rxq->id, notify);
2599 posted -= notify;
2600 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002601 } else if (atomic_read(&rxq->used) == 0) {
2602 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002603 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002604 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002605}
2606
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302607static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002608{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302609 struct be_queue_info *tx_cq = &txo->cq;
2610 struct be_tx_compl_info *txcp = &txo->txcp;
2611 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002612
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302613 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002614 return NULL;
2615
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302616 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002617 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302618 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002619
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302620 txcp->status = GET_TX_COMPL_BITS(status, compl);
2621 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002622
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302623 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002624 queue_tail_inc(tx_cq);
2625 return txcp;
2626}
2627
Sathya Perla3c8def92011-06-12 20:01:58 +00002628static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302629 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630{
Sathya Perla3c8def92011-06-12 20:01:58 +00002631 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002632 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002633 struct sk_buff *skb = NULL;
2634 bool unmap_skb_hdr = false;
2635 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302636 u16 num_wrbs = 0;
2637 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002638
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002639 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002640 if (sent_skbs[txq->tail]) {
2641 /* Free skb from prev req */
2642 if (skb)
2643 dev_consume_skb_any(skb);
2644 skb = sent_skbs[txq->tail];
2645 sent_skbs[txq->tail] = NULL;
2646 queue_tail_inc(txq); /* skip hdr wrb */
2647 num_wrbs++;
2648 unmap_skb_hdr = true;
2649 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002650 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002651 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002652 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002653 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002654 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002655 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002656 num_wrbs++;
2657 } while (frag_index != last_index);
2658 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002659
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002660 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002661}
2662
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002663/* Return the number of events in the event queue */
2664static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002665{
2666 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002667 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002668
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002669 do {
2670 eqe = queue_tail_node(&eqo->q);
2671 if (eqe->evt == 0)
2672 break;
2673
2674 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002675 eqe->evt = 0;
2676 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002677 queue_tail_inc(&eqo->q);
2678 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002679
2680 return num;
2681}
2682
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002683/* Leaves the EQ is disarmed state */
2684static void be_eq_clean(struct be_eq_obj *eqo)
2685{
2686 int num = events_get(eqo);
2687
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002688 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002689}
2690
Kalesh AP99b44302015-08-05 03:27:49 -04002691/* Free posted rx buffers that were not used */
2692static void be_rxq_clean(struct be_rx_obj *rxo)
2693{
2694 struct be_queue_info *rxq = &rxo->q;
2695 struct be_rx_page_info *page_info;
2696
2697 while (atomic_read(&rxq->used) > 0) {
2698 page_info = get_rx_page_info(rxo);
2699 put_page(page_info->page);
2700 memset(page_info, 0, sizeof(*page_info));
2701 }
2702 BUG_ON(atomic_read(&rxq->used));
2703 rxq->tail = 0;
2704 rxq->head = 0;
2705}
2706
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002707static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002708{
Sathya Perla3abcded2010-10-03 22:12:27 -07002709 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002710 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002711 struct be_adapter *adapter = rxo->adapter;
2712 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002713
Sathya Perlad23e9462012-12-17 19:38:51 +00002714 /* Consume pending rx completions.
2715 * Wait for the flush completion (identified by zero num_rcvd)
2716 * to arrive. Notify CQ even when there are no more CQ entries
2717 * for HW to flush partially coalesced CQ entries.
2718 * In Lancer, there is no need to wait for flush compl.
2719 */
2720 for (;;) {
2721 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302722 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002723 if (lancer_chip(adapter))
2724 break;
2725
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302726 if (flush_wait++ > 50 ||
2727 be_check_error(adapter,
2728 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002729 dev_warn(&adapter->pdev->dev,
2730 "did not receive flush compl\n");
2731 break;
2732 }
2733 be_cq_notify(adapter, rx_cq->id, true, 0);
2734 mdelay(1);
2735 } else {
2736 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002737 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002738 if (rxcp->num_rcvd == 0)
2739 break;
2740 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002741 }
2742
Sathya Perlad23e9462012-12-17 19:38:51 +00002743 /* After cleanup, leave the CQ in unarmed state */
2744 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002745}
2746
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002747static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002748{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002749 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302750 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302751 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002752 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302753 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302754 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002755 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002756
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302757 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002758 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002759 pending_txqs = adapter->num_tx_qs;
2760
2761 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302762 cmpl = 0;
2763 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002764 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302765 while ((txcp = be_tx_compl_get(txo))) {
2766 num_wrbs +=
2767 be_tx_compl_process(adapter, txo,
2768 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002769 cmpl++;
2770 }
2771 if (cmpl) {
2772 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2773 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302774 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002775 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302776 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002777 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002778 }
2779
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302780 if (pending_txqs == 0 || ++timeo > 10 ||
2781 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002782 break;
2783
2784 mdelay(1);
2785 } while (true);
2786
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002787 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002788 for_all_tx_queues(adapter, txo, i) {
2789 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002790
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002791 if (atomic_read(&txq->used)) {
2792 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2793 i, atomic_read(&txq->used));
2794 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002795 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002796 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2797 txq->len);
2798 /* Use the tx-compl process logic to handle requests
2799 * that were not sent to the HW.
2800 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002801 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2802 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002803 BUG_ON(atomic_read(&txq->used));
2804 txo->pend_wrb_cnt = 0;
2805 /* Since hw was never notified of these requests,
2806 * reset TXQ indices
2807 */
2808 txq->head = notified_idx;
2809 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002810 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002811 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002812}
2813
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002814static void be_evt_queues_destroy(struct be_adapter *adapter)
2815{
2816 struct be_eq_obj *eqo;
2817 int i;
2818
2819 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002820 if (eqo->q.created) {
2821 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002822 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302823 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002824 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002825 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002826 be_queue_free(adapter, &eqo->q);
2827 }
2828}
2829
2830static int be_evt_queues_create(struct be_adapter *adapter)
2831{
2832 struct be_queue_info *eq;
2833 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302834 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002835 int i, rc;
2836
Sathya Perlae2617682016-06-22 08:54:54 -04002837 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302838 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002839 max(adapter->cfg_num_rx_irqs,
2840 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002841
2842 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302843 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002844
Sathya Perla2632baf2013-10-01 16:00:00 +05302845 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002846 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002847 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302848 aic->max_eqd = BE_MAX_EQD;
2849 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002850
2851 eq = &eqo->q;
2852 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302853 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002854 if (rc)
2855 return rc;
2856
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302857 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002858 if (rc)
2859 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002860
2861 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2862 return -ENOMEM;
2863 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2864 eqo->affinity_mask);
2865 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2866 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002867 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002868 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002869}
2870
Sathya Perla5fb379e2009-06-18 00:02:59 +00002871static void be_mcc_queues_destroy(struct be_adapter *adapter)
2872{
2873 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002874
Sathya Perla8788fdc2009-07-27 22:52:03 +00002875 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002876 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002877 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002878 be_queue_free(adapter, q);
2879
Sathya Perla8788fdc2009-07-27 22:52:03 +00002880 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002881 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002882 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002883 be_queue_free(adapter, q);
2884}
2885
2886/* Must be called only after TX qs are created as MCC shares TX EQ */
2887static int be_mcc_queues_create(struct be_adapter *adapter)
2888{
2889 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002890
Sathya Perla8788fdc2009-07-27 22:52:03 +00002891 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002892 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302893 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002894 goto err;
2895
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002896 /* Use the default EQ for MCC completions */
2897 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002898 goto mcc_cq_free;
2899
Sathya Perla8788fdc2009-07-27 22:52:03 +00002900 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002901 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2902 goto mcc_cq_destroy;
2903
Sathya Perla8788fdc2009-07-27 22:52:03 +00002904 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002905 goto mcc_q_free;
2906
2907 return 0;
2908
2909mcc_q_free:
2910 be_queue_free(adapter, q);
2911mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002912 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002913mcc_cq_free:
2914 be_queue_free(adapter, cq);
2915err:
2916 return -1;
2917}
2918
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002919static void be_tx_queues_destroy(struct be_adapter *adapter)
2920{
2921 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002922 struct be_tx_obj *txo;
2923 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924
Sathya Perla3c8def92011-06-12 20:01:58 +00002925 for_all_tx_queues(adapter, txo, i) {
2926 q = &txo->q;
2927 if (q->created)
2928 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2929 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002930
Sathya Perla3c8def92011-06-12 20:01:58 +00002931 q = &txo->cq;
2932 if (q->created)
2933 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2934 be_queue_free(adapter, q);
2935 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002936}
2937
Sathya Perla77071332013-08-27 16:57:34 +05302938static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002939{
Sathya Perla73f394e2015-03-26 03:05:09 -04002940 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002941 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002942 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302943 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944
Sathya Perlae2617682016-06-22 08:54:54 -04002945 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002946
Sathya Perla3c8def92011-06-12 20:01:58 +00002947 for_all_tx_queues(adapter, txo, i) {
2948 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002949 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2950 sizeof(struct be_eth_tx_compl));
2951 if (status)
2952 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002953
John Stultz827da442013-10-07 15:51:58 -07002954 u64_stats_init(&txo->stats.sync);
2955 u64_stats_init(&txo->stats.sync_compl);
2956
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002957 /* If num_evt_qs is less than num_tx_qs, then more than
2958 * one txq share an eq
2959 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002960 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2961 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002962 if (status)
2963 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002964
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002965 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2966 sizeof(struct be_eth_wrb));
2967 if (status)
2968 return status;
2969
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002970 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002971 if (status)
2972 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002973
2974 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2975 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002976 }
2977
Sathya Perlad3791422012-09-28 04:39:44 +00002978 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2979 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002980 return 0;
2981}
2982
2983static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002984{
2985 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002986 struct be_rx_obj *rxo;
2987 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002988
Sathya Perla3abcded2010-10-03 22:12:27 -07002989 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002990 q = &rxo->cq;
2991 if (q->created)
2992 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2993 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002994 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002995}
2996
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002997static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002998{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002999 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07003000 struct be_rx_obj *rxo;
3001 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003002
Sathya Perlae2617682016-06-22 08:54:54 -04003003 adapter->num_rss_qs =
3004 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303005
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003006 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04003007 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003008 adapter->num_rss_qs = 0;
3009
3010 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3011
3012 /* When the interface is not capable of RSS rings (and there is no
3013 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003014 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003015 if (adapter->num_rx_qs == 0)
3016 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303017
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003018 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07003019 for_all_rx_queues(adapter, rxo, i) {
3020 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07003021 cq = &rxo->cq;
3022 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05303023 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07003024 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003025 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003026
John Stultz827da442013-10-07 15:51:58 -07003027 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003028 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3029 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07003030 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003031 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07003032 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003033
Sathya Perlad3791422012-09-28 04:39:44 +00003034 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003035 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003036 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00003037}
3038
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003039static irqreturn_t be_intx(int irq, void *dev)
3040{
Sathya Perlae49cc342012-11-27 19:50:02 +00003041 struct be_eq_obj *eqo = dev;
3042 struct be_adapter *adapter = eqo->adapter;
3043 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003044
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003045 /* IRQ is not expected when NAPI is scheduled as the EQ
3046 * will not be armed.
3047 * But, this can happen on Lancer INTx where it takes
3048 * a while to de-assert INTx or in BE2 where occasionaly
3049 * an interrupt may be raised even when EQ is unarmed.
3050 * If NAPI is already scheduled, then counting & notifying
3051 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00003052 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003053 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003054 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003055 __napi_schedule(&eqo->napi);
3056 if (num_evts)
3057 eqo->spurious_intr = 0;
3058 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003059 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003060
3061 /* Return IRQ_HANDLED only for the the first spurious intr
3062 * after a valid intr to stop the kernel from branding
3063 * this irq as a bad one!
3064 */
3065 if (num_evts || eqo->spurious_intr++ == 0)
3066 return IRQ_HANDLED;
3067 else
3068 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003069}
3070
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003071static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003072{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003073 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003074
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003075 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00003076 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003077 return IRQ_HANDLED;
3078}
3079
Sathya Perla2e588f82011-03-11 02:49:26 +00003080static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003081{
Somnath Koture38b1702013-05-29 22:55:56 +00003082 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003083}
3084
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003085static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05303086 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003087{
Sathya Perla3abcded2010-10-03 22:12:27 -07003088 struct be_adapter *adapter = rxo->adapter;
3089 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00003090 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003091 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05303092 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003093
3094 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07003095 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003096 if (!rxcp)
3097 break;
3098
Sathya Perla12004ae2011-08-02 19:57:46 +00003099 /* Is it a flush compl that has no data */
3100 if (unlikely(rxcp->num_rcvd == 0))
3101 goto loop_continue;
3102
3103 /* Discard compl with partial DMA Lancer B0 */
3104 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003105 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003106 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00003107 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00003108
Sathya Perla12004ae2011-08-02 19:57:46 +00003109 /* On BE drop pkts that arrive due to imperfect filtering in
3110 * promiscuous mode on some skews
3111 */
3112 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05303113 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003114 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003115 goto loop_continue;
3116 }
3117
Sathya Perla6384a4d2013-10-25 10:40:16 +05303118 /* Don't do gro when we're busy_polling */
3119 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003120 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003121 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05303122 be_rx_compl_process(rxo, napi, rxcp);
3123
Sathya Perla12004ae2011-08-02 19:57:46 +00003124loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05303125 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00003126 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003127 }
3128
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003129 if (work_done) {
3130 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003131
Sathya Perla6384a4d2013-10-25 10:40:16 +05303132 /* When an rx-obj gets into post_starved state, just
3133 * let be_worker do the posting.
3134 */
3135 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3136 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303137 be_post_rx_frags(rxo, GFP_ATOMIC,
3138 max_t(u32, MAX_RX_POST,
3139 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003140 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003141
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003142 return work_done;
3143}
3144
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303145static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303146{
3147 switch (status) {
3148 case BE_TX_COMP_HDR_PARSE_ERR:
3149 tx_stats(txo)->tx_hdr_parse_err++;
3150 break;
3151 case BE_TX_COMP_NDMA_ERR:
3152 tx_stats(txo)->tx_dma_err++;
3153 break;
3154 case BE_TX_COMP_ACL_ERR:
3155 tx_stats(txo)->tx_spoof_check_err++;
3156 break;
3157 }
3158}
3159
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303160static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303161{
3162 switch (status) {
3163 case LANCER_TX_COMP_LSO_ERR:
3164 tx_stats(txo)->tx_tso_err++;
3165 break;
3166 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3167 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3168 tx_stats(txo)->tx_spoof_check_err++;
3169 break;
3170 case LANCER_TX_COMP_QINQ_ERR:
3171 tx_stats(txo)->tx_qinq_err++;
3172 break;
3173 case LANCER_TX_COMP_PARITY_ERR:
3174 tx_stats(txo)->tx_internal_parity_err++;
3175 break;
3176 case LANCER_TX_COMP_DMA_ERR:
3177 tx_stats(txo)->tx_dma_err++;
3178 break;
3179 }
3180}
3181
Sathya Perlac8f64612014-09-02 09:56:55 +05303182static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3183 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003184{
Sathya Perlac8f64612014-09-02 09:56:55 +05303185 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303186 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003187
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303188 while ((txcp = be_tx_compl_get(txo))) {
3189 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303190 work_done++;
3191
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303192 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303193 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303194 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303195 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303196 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303197 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003198 }
3199
3200 if (work_done) {
3201 be_cq_notify(adapter, txo->cq.id, true, work_done);
3202 atomic_sub(num_wrbs, &txo->q.used);
3203
3204 /* As Tx wrbs have been freed up, wake up netdev queue
3205 * if it was stopped due to lack of tx wrbs. */
3206 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303207 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003208 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003209 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003210
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003211 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3212 tx_stats(txo)->tx_compl += work_done;
3213 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3214 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003215}
Sathya Perla3c8def92011-06-12 20:01:58 +00003216
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003217#ifdef CONFIG_NET_RX_BUSY_POLL
3218static inline bool be_lock_napi(struct be_eq_obj *eqo)
3219{
3220 bool status = true;
3221
3222 spin_lock(&eqo->lock); /* BH is already disabled */
3223 if (eqo->state & BE_EQ_LOCKED) {
3224 WARN_ON(eqo->state & BE_EQ_NAPI);
3225 eqo->state |= BE_EQ_NAPI_YIELD;
3226 status = false;
3227 } else {
3228 eqo->state = BE_EQ_NAPI;
3229 }
3230 spin_unlock(&eqo->lock);
3231 return status;
3232}
3233
3234static inline void be_unlock_napi(struct be_eq_obj *eqo)
3235{
3236 spin_lock(&eqo->lock); /* BH is already disabled */
3237
3238 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3239 eqo->state = BE_EQ_IDLE;
3240
3241 spin_unlock(&eqo->lock);
3242}
3243
3244static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3245{
3246 bool status = true;
3247
3248 spin_lock_bh(&eqo->lock);
3249 if (eqo->state & BE_EQ_LOCKED) {
3250 eqo->state |= BE_EQ_POLL_YIELD;
3251 status = false;
3252 } else {
3253 eqo->state |= BE_EQ_POLL;
3254 }
3255 spin_unlock_bh(&eqo->lock);
3256 return status;
3257}
3258
3259static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3260{
3261 spin_lock_bh(&eqo->lock);
3262
3263 WARN_ON(eqo->state & (BE_EQ_NAPI));
3264 eqo->state = BE_EQ_IDLE;
3265
3266 spin_unlock_bh(&eqo->lock);
3267}
3268
3269static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3270{
3271 spin_lock_init(&eqo->lock);
3272 eqo->state = BE_EQ_IDLE;
3273}
3274
3275static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3276{
3277 local_bh_disable();
3278
3279 /* It's enough to just acquire napi lock on the eqo to stop
3280 * be_busy_poll() from processing any queueus.
3281 */
3282 while (!be_lock_napi(eqo))
3283 mdelay(1);
3284
3285 local_bh_enable();
3286}
3287
3288#else /* CONFIG_NET_RX_BUSY_POLL */
3289
3290static inline bool be_lock_napi(struct be_eq_obj *eqo)
3291{
3292 return true;
3293}
3294
3295static inline void be_unlock_napi(struct be_eq_obj *eqo)
3296{
3297}
3298
3299static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3300{
3301 return false;
3302}
3303
3304static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3305{
3306}
3307
3308static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3309{
3310}
3311
3312static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3313{
3314}
3315#endif /* CONFIG_NET_RX_BUSY_POLL */
3316
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303317int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003318{
3319 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3320 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003321 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303322 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303323 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003324 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003325
Sathya Perla0b545a62012-11-23 00:27:18 +00003326 num_evts = events_get(eqo);
3327
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303328 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3329 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003330
Sathya Perla6384a4d2013-10-25 10:40:16 +05303331 if (be_lock_napi(eqo)) {
3332 /* This loop will iterate twice for EQ0 in which
3333 * completions of the last RXQ (default one) are also processed
3334 * For other EQs the loop iterates only once
3335 */
3336 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3337 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3338 max_work = max(work, max_work);
3339 }
3340 be_unlock_napi(eqo);
3341 } else {
3342 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003343 }
3344
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003345 if (is_mcc_eqo(eqo))
3346 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003347
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003348 if (max_work < budget) {
3349 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003350
3351 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3352 * delay via a delay multiplier encoding value
3353 */
3354 if (skyhawk_chip(adapter))
3355 mult_enc = be_get_eq_delay_mult_enc(eqo);
3356
3357 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3358 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003359 } else {
3360 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003361 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003362 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003363 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003364}
3365
Sathya Perla6384a4d2013-10-25 10:40:16 +05303366#ifdef CONFIG_NET_RX_BUSY_POLL
3367static int be_busy_poll(struct napi_struct *napi)
3368{
3369 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3370 struct be_adapter *adapter = eqo->adapter;
3371 struct be_rx_obj *rxo;
3372 int i, work = 0;
3373
3374 if (!be_lock_busy_poll(eqo))
3375 return LL_FLUSH_BUSY;
3376
3377 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3378 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3379 if (work)
3380 break;
3381 }
3382
3383 be_unlock_busy_poll(eqo);
3384 return work;
3385}
3386#endif
3387
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003388void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003389{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003390 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3391 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003392 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303393 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003394
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303395 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003396 return;
3397
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003398 if (lancer_chip(adapter)) {
3399 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3400 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303401 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003402 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303403 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003404 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303405 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303406 /* Do not log error messages if its a FW reset */
3407 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3408 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3409 dev_info(dev, "Firmware update in progress\n");
3410 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303411 dev_err(dev, "Error detected in the card\n");
3412 dev_err(dev, "ERR: sliport status 0x%x\n",
3413 sliport_status);
3414 dev_err(dev, "ERR: sliport error1 0x%x\n",
3415 sliport_err1);
3416 dev_err(dev, "ERR: sliport error2 0x%x\n",
3417 sliport_err2);
3418 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003419 }
3420 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003421 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3422 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3423 ue_lo_mask = ioread32(adapter->pcicfg +
3424 PCICFG_UE_STATUS_LOW_MASK);
3425 ue_hi_mask = ioread32(adapter->pcicfg +
3426 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003427
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003428 ue_lo = (ue_lo & ~ue_lo_mask);
3429 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003430
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303431 /* On certain platforms BE hardware can indicate spurious UEs.
3432 * Allow HW to stop working completely in case of a real UE.
3433 * Hence not setting the hw_error for UE detection.
3434 */
3435
3436 if (ue_lo || ue_hi) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303437 dev_err(dev, "Error detected in the adapter");
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303438 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303439 be_set_error(adapter, BE_ERROR_UE);
3440
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303441 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3442 if (ue_lo & 1)
3443 dev_err(dev, "UE: %s bit set\n",
3444 ue_status_low_desc[i]);
3445 }
3446 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3447 if (ue_hi & 1)
3448 dev_err(dev, "UE: %s bit set\n",
3449 ue_status_hi_desc[i]);
3450 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303451 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003452 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003453}
3454
Sathya Perla8d56ff12009-11-22 22:02:26 +00003455static void be_msix_disable(struct be_adapter *adapter)
3456{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003457 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003458 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003459 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303460 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003461 }
3462}
3463
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003464static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003465{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003466 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003467 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003468 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003469
Sathya Perlace7faf02016-06-22 08:54:53 -04003470 /* If RoCE is supported, program the max number of vectors that
3471 * could be used for NIC and RoCE, else, just program the number
3472 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303473 */
Sathya Perlae2617682016-06-22 08:54:54 -04003474 if (be_roce_supported(adapter)) {
3475 max_roce_eqs =
3476 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3477 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3478 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3479 } else {
3480 num_vec = max(adapter->cfg_num_rx_irqs,
3481 adapter->cfg_num_tx_irqs);
3482 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003483
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003484 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003485 adapter->msix_entries[i].entry = i;
3486
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003487 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3488 MIN_MSIX_VECTORS, num_vec);
3489 if (num_vec < 0)
3490 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003491
Sathya Perla92bf14a2013-08-27 16:57:32 +05303492 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3493 adapter->num_msix_roce_vec = num_vec / 2;
3494 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3495 adapter->num_msix_roce_vec);
3496 }
3497
3498 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3499
3500 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3501 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003502 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003503
3504fail:
3505 dev_warn(dev, "MSIx enable failed\n");
3506
3507 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003508 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003509 return num_vec;
3510 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003511}
3512
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003513static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303514 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003515{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303516 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003517}
3518
3519static int be_msix_register(struct be_adapter *adapter)
3520{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003521 struct net_device *netdev = adapter->netdev;
3522 struct be_eq_obj *eqo;
3523 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003524
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003525 for_all_evt_queues(adapter, eqo, i) {
3526 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3527 vec = be_msix_vec_get(adapter, eqo);
3528 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003529 if (status)
3530 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003531
3532 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003533 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003534
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003535 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003536err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303537 for (i--; i >= 0; i--) {
3538 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003539 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303540 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003541 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303542 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003543 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003544 return status;
3545}
3546
3547static int be_irq_register(struct be_adapter *adapter)
3548{
3549 struct net_device *netdev = adapter->netdev;
3550 int status;
3551
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003552 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003553 status = be_msix_register(adapter);
3554 if (status == 0)
3555 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003556 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003557 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003558 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003559 }
3560
Sathya Perlae49cc342012-11-27 19:50:02 +00003561 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003562 netdev->irq = adapter->pdev->irq;
3563 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003564 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003565 if (status) {
3566 dev_err(&adapter->pdev->dev,
3567 "INTx request IRQ failed - err %d\n", status);
3568 return status;
3569 }
3570done:
3571 adapter->isr_registered = true;
3572 return 0;
3573}
3574
3575static void be_irq_unregister(struct be_adapter *adapter)
3576{
3577 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003578 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003579 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003580
3581 if (!adapter->isr_registered)
3582 return;
3583
3584 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003585 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003586 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003587 goto done;
3588 }
3589
3590 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003591 for_all_evt_queues(adapter, eqo, i) {
3592 vec = be_msix_vec_get(adapter, eqo);
3593 irq_set_affinity_hint(vec, NULL);
3594 free_irq(vec, eqo);
3595 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003597done:
3598 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003599}
3600
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003601static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003602{
Ajit Khaparde62219062016-02-10 22:45:53 +05303603 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003604 struct be_queue_info *q;
3605 struct be_rx_obj *rxo;
3606 int i;
3607
3608 for_all_rx_queues(adapter, rxo, i) {
3609 q = &rxo->q;
3610 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003611 /* If RXQs are destroyed while in an "out of buffer"
3612 * state, there is a possibility of an HW stall on
3613 * Lancer. So, post 64 buffers to each queue to relieve
3614 * the "out of buffer" condition.
3615 * Make sure there's space in the RXQ before posting.
3616 */
3617 if (lancer_chip(adapter)) {
3618 be_rx_cq_clean(rxo);
3619 if (atomic_read(&q->used) == 0)
3620 be_post_rx_frags(rxo, GFP_KERNEL,
3621 MAX_RX_POST);
3622 }
3623
Sathya Perla482c9e72011-06-29 23:33:17 +00003624 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003625 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003626 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003627 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003628 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003629 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303630
3631 if (rss->rss_flags) {
3632 rss->rss_flags = RSS_ENABLE_NONE;
3633 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3634 128, rss->rss_hkey);
3635 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003636}
3637
Kalesh APbcc84142015-08-05 03:27:48 -04003638static void be_disable_if_filters(struct be_adapter *adapter)
3639{
Ivan Vecera02434de2017-01-13 22:38:28 +01003640 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3641 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3642 check_privilege(adapter, BE_PRIV_FILTMGMT))
3643 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3644
Kalesh APbcc84142015-08-05 03:27:48 -04003645 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003646 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003647
3648 /* The IFACE flags are enabled in the open path and cleared
3649 * in the close path. When a VF gets detached from the host and
3650 * assigned to a VM the following happens:
3651 * - VF's IFACE flags get cleared in the detach path
3652 * - IFACE create is issued by the VF in the attach path
3653 * Due to a bug in the BE3/Skyhawk-R FW
3654 * (Lancer FW doesn't have the bug), the IFACE capability flags
3655 * specified along with the IFACE create cmd issued by a VF are not
3656 * honoured by FW. As a consequence, if a *new* driver
3657 * (that enables/disables IFACE flags in open/close)
3658 * is loaded in the host and an *old* driver is * used by a VM/VF,
3659 * the IFACE gets created *without* the needed flags.
3660 * To avoid this, disable RX-filter flags only for Lancer.
3661 */
3662 if (lancer_chip(adapter)) {
3663 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3664 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3665 }
3666}
3667
Sathya Perla889cd4b2010-05-30 23:33:45 +00003668static int be_close(struct net_device *netdev)
3669{
3670 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003671 struct be_eq_obj *eqo;
3672 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003673
Kalesh APe1ad8e32014-04-14 16:12:41 +05303674 /* This protection is needed as be_close() may be called even when the
3675 * adapter is in cleared state (after eeh perm failure)
3676 */
3677 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3678 return 0;
3679
Sathya Perlab7172412016-07-27 05:26:18 -04003680 /* Before attempting cleanup ensure all the pending cmds in the
3681 * config_wq have finished execution
3682 */
3683 flush_workqueue(be_wq);
3684
Kalesh APbcc84142015-08-05 03:27:48 -04003685 be_disable_if_filters(adapter);
3686
Ivan Veceradff345c52013-11-27 08:59:32 +01003687 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3688 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003689 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303690 be_disable_busy_poll(eqo);
3691 }
David S. Miller71237b62013-11-28 18:53:36 -05003692 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003693 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003694
3695 be_async_mcc_disable(adapter);
3696
3697 /* Wait for all pending tx completions to arrive so that
3698 * all tx skbs are freed.
3699 */
Sathya Perlafba87552013-05-08 02:05:50 +00003700 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303701 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003702
3703 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003704
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003705 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003706 if (msix_enabled(adapter))
3707 synchronize_irq(be_msix_vec_get(adapter, eqo));
3708 else
3709 synchronize_irq(netdev->irq);
3710 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003711 }
3712
Sathya Perla889cd4b2010-05-30 23:33:45 +00003713 be_irq_unregister(adapter);
3714
Sathya Perla482c9e72011-06-29 23:33:17 +00003715 return 0;
3716}
3717
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003718static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003719{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003720 struct rss_info *rss = &adapter->rss_info;
3721 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003722 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003723 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003724
3725 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003726 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3727 sizeof(struct be_eth_rx_d));
3728 if (rc)
3729 return rc;
3730 }
3731
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003732 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3733 rxo = default_rxo(adapter);
3734 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3735 rx_frag_size, adapter->if_handle,
3736 false, &rxo->rss_id);
3737 if (rc)
3738 return rc;
3739 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003740
3741 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003742 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003743 rx_frag_size, adapter->if_handle,
3744 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003745 if (rc)
3746 return rc;
3747 }
3748
3749 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003750 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003751 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303752 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003753 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303754 rss->rsstable[j + i] = rxo->rss_id;
3755 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003756 }
3757 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303758 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3759 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003760
3761 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303762 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3763 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303764
3765 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3766 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3767 RSS_INDIR_TABLE_LEN, rss_key);
3768 if (rc) {
3769 rss->rss_flags = RSS_ENABLE_NONE;
3770 return rc;
3771 }
3772
3773 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303774 } else {
3775 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303776 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303777 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003778
Venkata Duvvurue2557872014-04-21 15:38:00 +05303779
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003780 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3781 * which is a queue empty condition
3782 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003783 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003784 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3785
Sathya Perla889cd4b2010-05-30 23:33:45 +00003786 return 0;
3787}
3788
Kalesh APbcc84142015-08-05 03:27:48 -04003789static int be_enable_if_filters(struct be_adapter *adapter)
3790{
3791 int status;
3792
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003793 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003794 if (status)
3795 return status;
3796
Ivan Veceracc439962017-01-13 22:38:29 +01003797 /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
3798 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3799 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05303800 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003801 if (status)
3802 return status;
Suresh Reddyc27ebf52016-09-07 19:57:53 +05303803 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003804 }
3805
3806 if (adapter->vlans_added)
3807 be_vid_config(adapter);
3808
Sathya Perlab7172412016-07-27 05:26:18 -04003809 __be_set_rx_mode(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003810
3811 return 0;
3812}
3813
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003814static int be_open(struct net_device *netdev)
3815{
3816 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003817 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003818 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003819 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003820 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003821 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003822
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003823 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003824 if (status)
3825 goto err;
3826
Kalesh APbcc84142015-08-05 03:27:48 -04003827 status = be_enable_if_filters(adapter);
3828 if (status)
3829 goto err;
3830
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003831 status = be_irq_register(adapter);
3832 if (status)
3833 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003834
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003835 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003836 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003837
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003838 for_all_tx_queues(adapter, txo, i)
3839 be_cq_notify(adapter, txo->cq.id, true, 0);
3840
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003841 be_async_mcc_enable(adapter);
3842
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003843 for_all_evt_queues(adapter, eqo, i) {
3844 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303845 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003846 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003847 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003848 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003849
Sathya Perla323ff712012-09-28 04:39:43 +00003850 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003851 if (!status)
3852 be_link_status_update(adapter, link_status);
3853
Sathya Perlafba87552013-05-08 02:05:50 +00003854 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303855 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003856 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303857
Sathya Perla889cd4b2010-05-30 23:33:45 +00003858 return 0;
3859err:
3860 be_close(adapter->netdev);
3861 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003862}
3863
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003864static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3865{
3866 u32 addr;
3867
3868 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3869
3870 mac[5] = (u8)(addr & 0xFF);
3871 mac[4] = (u8)((addr >> 8) & 0xFF);
3872 mac[3] = (u8)((addr >> 16) & 0xFF);
3873 /* Use the OUI from the current MAC address */
3874 memcpy(mac, adapter->netdev->dev_addr, 3);
3875}
3876
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003877/*
3878 * Generate a seed MAC address from the PF MAC Address using jhash.
3879 * MAC Address for VFs are assigned incrementally starting from the seed.
3880 * These addresses are programmed in the ASIC by the PF and the VF driver
3881 * queries for the MAC address during its probe.
3882 */
Sathya Perla4c876612013-02-03 20:30:11 +00003883static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003884{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003885 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003886 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003887 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003888 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003889
3890 be_vf_eth_addr_generate(adapter, mac);
3891
Sathya Perla11ac75e2011-12-13 00:58:50 +00003892 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303893 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003894 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003895 vf_cfg->if_handle,
3896 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303897 else
3898 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3899 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003900
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003901 if (status)
3902 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303903 "Mac address assignment failed for VF %d\n",
3904 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003905 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003906 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003907
3908 mac[5] += 1;
3909 }
3910 return status;
3911}
3912
Sathya Perla4c876612013-02-03 20:30:11 +00003913static int be_vfs_mac_query(struct be_adapter *adapter)
3914{
3915 int status, vf;
3916 u8 mac[ETH_ALEN];
3917 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003918
3919 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303920 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3921 mac, vf_cfg->if_handle,
3922 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003923 if (status)
3924 return status;
3925 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3926 }
3927 return 0;
3928}
3929
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003930static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003931{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003932 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003933 u32 vf;
3934
Sathya Perla257a3fe2013-06-14 15:54:51 +05303935 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003936 dev_warn(&adapter->pdev->dev,
3937 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003938 goto done;
3939 }
3940
Sathya Perlab4c1df92013-05-08 02:05:47 +00003941 pci_disable_sriov(adapter->pdev);
3942
Sathya Perla11ac75e2011-12-13 00:58:50 +00003943 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303944 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003945 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3946 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303947 else
3948 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3949 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003950
Sathya Perla11ac75e2011-12-13 00:58:50 +00003951 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3952 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003953
3954 if (BE3_chip(adapter))
3955 be_cmd_set_hsw_config(adapter, 0, 0,
3956 adapter->if_handle,
3957 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003958done:
3959 kfree(adapter->vf_cfg);
3960 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303961 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003962}
3963
Sathya Perla77071332013-08-27 16:57:34 +05303964static void be_clear_queues(struct be_adapter *adapter)
3965{
3966 be_mcc_queues_destroy(adapter);
3967 be_rx_cqs_destroy(adapter);
3968 be_tx_queues_destroy(adapter);
3969 be_evt_queues_destroy(adapter);
3970}
3971
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303972static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003973{
Sathya Perla191eb752012-02-23 18:50:13 +00003974 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3975 cancel_delayed_work_sync(&adapter->work);
3976 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3977 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303978}
3979
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003980static void be_cancel_err_detection(struct be_adapter *adapter)
3981{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303982 struct be_error_recovery *err_rec = &adapter->error_recovery;
3983
3984 if (!be_err_recovery_workq)
3985 return;
3986
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003987 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303988 cancel_delayed_work_sync(&err_rec->err_detection_work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003989 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3990 }
3991}
3992
Sathya Perlac9c47142014-03-27 10:46:19 +05303993static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3994{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003995 struct net_device *netdev = adapter->netdev;
3996
Sathya Perlac9c47142014-03-27 10:46:19 +05303997 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3998 be_cmd_manage_iface(adapter, adapter->if_handle,
3999 OP_CONVERT_TUNNEL_TO_NORMAL);
4000
4001 if (adapter->vxlan_port)
4002 be_cmd_set_vxlan_port(adapter, 0);
4003
4004 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
4005 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004006
4007 netdev->hw_enc_features = 0;
4008 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304009 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05304010}
4011
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004012static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4013 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05004014{
4015 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004016 u32 vf_if_cap_flags = res.vf_if_cap_flags;
4017 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004018 u16 num_vf_qs = 1;
4019
Somnath Koturde2b1e02016-06-06 07:22:10 -04004020 /* Distribute the queue resources among the PF and it's VFs */
4021 if (num_vfs) {
4022 /* Divide the rx queues evenly among the VFs and the PF, capped
4023 * at VF-EQ-count. Any remainder queues belong to the PF.
4024 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05304025 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4026 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05004027
Somnath Koturde2b1e02016-06-06 07:22:10 -04004028 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4029 * RSS Tables per port. Provide RSS on VFs, only if number of
4030 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05004031 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004032 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05004033 num_vf_qs = 1;
4034 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004035
4036 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4037 * which are modifiable using SET_PROFILE_CONFIG cmd.
4038 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004039 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4040 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004041
4042 /* If RSS IFACE capability flags are modifiable for a VF, set the
4043 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4044 * more than 1 RSSQ is available for a VF.
4045 * Otherwise, provision only 1 queue pair for VF.
4046 */
4047 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4048 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4049 if (num_vf_qs > 1) {
4050 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4051 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4052 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4053 } else {
4054 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4055 BE_IF_FLAGS_DEFQ_RSS);
4056 }
4057 } else {
4058 num_vf_qs = 1;
4059 }
4060
4061 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4062 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4063 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4064 }
4065
4066 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4067 vft_res->max_rx_qs = num_vf_qs;
4068 vft_res->max_rss_qs = num_vf_qs;
4069 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4070 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4071
4072 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4073 * among the PF and it's VFs, if the fields are changeable
4074 */
4075 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4076 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4077
4078 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4079 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4080
4081 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4082 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4083
4084 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4085 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004086}
4087
Sathya Perlab7172412016-07-27 05:26:18 -04004088static void be_if_destroy(struct be_adapter *adapter)
4089{
4090 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4091
4092 kfree(adapter->pmac_id);
4093 adapter->pmac_id = NULL;
4094
4095 kfree(adapter->mc_list);
4096 adapter->mc_list = NULL;
4097
4098 kfree(adapter->uc_list);
4099 adapter->uc_list = NULL;
4100}
4101
Somnath Koturb05004a2013-12-05 12:08:16 +05304102static int be_clear(struct be_adapter *adapter)
4103{
Vasundhara Volamf2858732015-03-04 00:44:33 -05004104 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004105 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004106
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304107 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00004108
Sathya Perlab7172412016-07-27 05:26:18 -04004109 flush_workqueue(be_wq);
4110
Sathya Perla11ac75e2011-12-13 00:58:50 +00004111 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004112 be_vf_clear(adapter);
4113
Vasundhara Volambec84e62014-06-30 13:01:32 +05304114 /* Re-configure FW to distribute resources evenly across max-supported
4115 * number of VFs, only when VFs are not already enabled.
4116 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05004117 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4118 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004119 be_calculate_vf_res(adapter,
4120 pci_sriov_get_totalvfs(pdev),
4121 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304122 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004123 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004124 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004125 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304126
Sathya Perlac9c47142014-03-27 10:46:19 +05304127 be_disable_vxlan_offloads(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004128
Sathya Perlab7172412016-07-27 05:26:18 -04004129 be_if_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004130
Sathya Perla77071332013-08-27 16:57:34 +05304131 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004132
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004133 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304134 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004135 return 0;
4136}
4137
Sathya Perla4c876612013-02-03 20:30:11 +00004138static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004139{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304140 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04004141 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00004142 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05004143 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004144
Kalesh AP0700d812015-01-20 03:51:43 -05004145 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004146 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004147
Sathya Perla4c876612013-02-03 20:30:11 +00004148 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304149 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04004150 status = be_cmd_get_profile_config(adapter, &res, NULL,
4151 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004152 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05304153 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004154 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304155 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004156 /* Prevent VFs from enabling VLAN promiscuous
4157 * mode
4158 */
4159 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4160 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304161 }
Sathya Perla4c876612013-02-03 20:30:11 +00004162
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004163 /* PF should enable IF flags during proxy if_create call */
4164 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004165 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4166 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004167 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004168 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004169 }
Kalesh AP0700d812015-01-20 03:51:43 -05004170
4171 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004172}
4173
Sathya Perla39f1d942012-05-08 19:41:24 +00004174static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004175{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004176 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004177 int vf;
4178
Sathya Perla39f1d942012-05-08 19:41:24 +00004179 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4180 GFP_KERNEL);
4181 if (!adapter->vf_cfg)
4182 return -ENOMEM;
4183
Sathya Perla11ac75e2011-12-13 00:58:50 +00004184 for_all_vfs(adapter, vf_cfg, vf) {
4185 vf_cfg->if_handle = -1;
4186 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004187 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004188 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004189}
4190
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004191static int be_vf_setup(struct be_adapter *adapter)
4192{
Sathya Perla4c876612013-02-03 20:30:11 +00004193 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304194 struct be_vf_cfg *vf_cfg;
4195 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004196 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004197
Sathya Perla257a3fe2013-06-14 15:54:51 +05304198 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004199
4200 status = be_vf_setup_init(adapter);
4201 if (status)
4202 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004203
Sathya Perla4c876612013-02-03 20:30:11 +00004204 if (old_vfs) {
4205 for_all_vfs(adapter, vf_cfg, vf) {
4206 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4207 if (status)
4208 goto err;
4209 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004210
Sathya Perla4c876612013-02-03 20:30:11 +00004211 status = be_vfs_mac_query(adapter);
4212 if (status)
4213 goto err;
4214 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304215 status = be_vfs_if_create(adapter);
4216 if (status)
4217 goto err;
4218
Sathya Perla39f1d942012-05-08 19:41:24 +00004219 status = be_vf_eth_addr_config(adapter);
4220 if (status)
4221 goto err;
4222 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004223
Sathya Perla11ac75e2011-12-13 00:58:50 +00004224 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304225 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004226 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4227 vf + 1);
4228 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304229 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004230 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304231 BE_PRIV_FILTMGMT,
4232 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004233 if (!status) {
4234 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304235 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4236 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004237 }
Sathya Perla04a06022013-07-23 15:25:00 +05304238 }
4239
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304240 /* Allow full available bandwidth */
4241 if (!old_vfs)
4242 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004243
Kalesh APe7bcbd72015-05-06 05:30:32 -04004244 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4245 vf_cfg->if_handle, NULL,
4246 &spoofchk);
4247 if (!status)
4248 vf_cfg->spoofchk = spoofchk;
4249
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304250 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304251 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304252 be_cmd_set_logical_link_config(adapter,
4253 IFLA_VF_LINK_STATE_AUTO,
4254 vf+1);
4255 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004256 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004257
4258 if (!old_vfs) {
4259 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4260 if (status) {
4261 dev_err(dev, "SRIOV enable failed\n");
4262 adapter->num_vfs = 0;
4263 goto err;
4264 }
4265 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304266
Somnath Kotur884476b2016-06-22 08:54:55 -04004267 if (BE3_chip(adapter)) {
4268 /* On BE3, enable VEB only when SRIOV is enabled */
4269 status = be_cmd_set_hsw_config(adapter, 0, 0,
4270 adapter->if_handle,
4271 PORT_FWD_TYPE_VEB, 0);
4272 if (status)
4273 goto err;
4274 }
4275
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304276 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004277 return 0;
4278err:
Sathya Perla4c876612013-02-03 20:30:11 +00004279 dev_err(dev, "VF setup failed\n");
4280 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004281 return status;
4282}
4283
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304284/* Converting function_mode bits on BE3 to SH mc_type enums */
4285
4286static u8 be_convert_mc_type(u32 function_mode)
4287{
Suresh Reddy66064db2014-06-23 16:41:29 +05304288 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304289 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304290 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304291 return FLEX10;
4292 else if (function_mode & VNIC_MODE)
4293 return vNIC2;
4294 else if (function_mode & UMC_ENABLED)
4295 return UMC;
4296 else
4297 return MC_NONE;
4298}
4299
Sathya Perla92bf14a2013-08-27 16:57:32 +05304300/* On BE2/BE3 FW does not suggest the supported limits */
4301static void BEx_get_resources(struct be_adapter *adapter,
4302 struct be_resources *res)
4303{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304304 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304305
4306 if (be_physfn(adapter))
4307 res->max_uc_mac = BE_UC_PMAC_COUNT;
4308 else
4309 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4310
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304311 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4312
4313 if (be_is_mc(adapter)) {
4314 /* Assuming that there are 4 channels per port,
4315 * when multi-channel is enabled
4316 */
4317 if (be_is_qnq_mode(adapter))
4318 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4319 else
4320 /* In a non-qnq multichannel mode, the pvid
4321 * takes up one vlan entry
4322 */
4323 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4324 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304325 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304326 }
4327
Sathya Perla92bf14a2013-08-27 16:57:32 +05304328 res->max_mcast_mac = BE_MAX_MC;
4329
Vasundhara Volama5243da2014-03-11 18:53:07 +05304330 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4331 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4332 * *only* if it is RSS-capable.
4333 */
4334 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004335 be_virtfn(adapter) ||
4336 (be_is_mc(adapter) &&
4337 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304338 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304339 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4340 struct be_resources super_nic_res = {0};
4341
4342 /* On a SuperNIC profile, the driver needs to use the
4343 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4344 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004345 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4346 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4347 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304348 /* Some old versions of BE3 FW don't report max_tx_qs value */
4349 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4350 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304351 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304352 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304353
4354 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4355 !use_sriov && be_physfn(adapter))
4356 res->max_rss_qs = (adapter->be3_native) ?
4357 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4358 res->max_rx_qs = res->max_rss_qs + 1;
4359
Suresh Reddye3dc8672014-01-06 13:02:25 +05304360 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304361 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304362 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4363 else
4364 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304365
4366 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004367 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304368 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4369 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4370}
4371
Sathya Perla30128032011-11-10 19:17:57 +00004372static void be_setup_init(struct be_adapter *adapter)
4373{
4374 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004375 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004376 adapter->if_handle = -1;
4377 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004378 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304379 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004380 if (be_physfn(adapter))
4381 adapter->cmd_privileges = MAX_PRIVILEGES;
4382 else
4383 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004384}
4385
Somnath Koturde2b1e02016-06-06 07:22:10 -04004386/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4387 * However, this HW limitation is not exposed to the host via any SLI cmd.
4388 * As a result, in the case of SRIOV and in particular multi-partition configs
4389 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4390 * for distribution between the VFs. This self-imposed limit will determine the
4391 * no: of VFs for which RSS can be enabled.
4392 */
Baoyou Xied766e7e2016-09-18 16:35:29 +08004393static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
Somnath Koturde2b1e02016-06-06 07:22:10 -04004394{
4395 struct be_port_resources port_res = {0};
4396 u8 rss_tables_on_port;
4397 u16 max_vfs = be_max_vfs(adapter);
4398
4399 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4400 RESOURCE_LIMITS, 0);
4401
4402 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4403
4404 /* Each PF Pool's RSS Tables limit =
4405 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4406 */
4407 adapter->pool_res.max_rss_tables =
4408 max_vfs * rss_tables_on_port / port_res.max_vfs;
4409}
4410
Vasundhara Volambec84e62014-06-30 13:01:32 +05304411static int be_get_sriov_config(struct be_adapter *adapter)
4412{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304413 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304414 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304415
Somnath Koturde2b1e02016-06-06 07:22:10 -04004416 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4417 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304418
Vasundhara Volamace40af2015-03-04 00:44:34 -05004419 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304420 if (BE3_chip(adapter) && !res.max_vfs) {
4421 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4422 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4423 }
4424
Sathya Perlad3d18312014-08-01 17:47:30 +05304425 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304426
Vasundhara Volamace40af2015-03-04 00:44:34 -05004427 /* If during previous unload of the driver, the VFs were not disabled,
4428 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4429 * Instead use the TotalVFs value stored in the pci-dev struct.
4430 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304431 old_vfs = pci_num_vf(adapter->pdev);
4432 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004433 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4434 old_vfs);
4435
4436 adapter->pool_res.max_vfs =
4437 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304438 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304439 }
4440
Somnath Koturde2b1e02016-06-06 07:22:10 -04004441 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4442 be_calculate_pf_pool_rss_tables(adapter);
4443 dev_info(&adapter->pdev->dev,
4444 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4445 be_max_pf_pool_rss_tables(adapter));
4446 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304447 return 0;
4448}
4449
Vasundhara Volamace40af2015-03-04 00:44:34 -05004450static void be_alloc_sriov_res(struct be_adapter *adapter)
4451{
4452 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004453 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004454 int status;
4455
4456 be_get_sriov_config(adapter);
4457
4458 if (!old_vfs)
4459 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4460
4461 /* When the HW is in SRIOV capable configuration, the PF-pool
4462 * resources are given to PF during driver load, if there are no
4463 * old VFs. This facility is not available in BE3 FW.
4464 * Also, this is done by FW in Lancer chip.
4465 */
4466 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004467 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004468 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004469 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004470 if (status)
4471 dev_err(&adapter->pdev->dev,
4472 "Failed to optimize SRIOV resources\n");
4473 }
4474}
4475
Sathya Perla92bf14a2013-08-27 16:57:32 +05304476static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004477{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304478 struct device *dev = &adapter->pdev->dev;
4479 struct be_resources res = {0};
4480 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004481
Sathya Perla92bf14a2013-08-27 16:57:32 +05304482 /* For Lancer, SH etc read per-function resource limits from FW.
4483 * GET_FUNC_CONFIG returns per function guaranteed limits.
4484 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4485 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004486 if (BEx_chip(adapter)) {
4487 BEx_get_resources(adapter, &res);
4488 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304489 status = be_cmd_get_func_config(adapter, &res);
4490 if (status)
4491 return status;
4492
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004493 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4494 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4495 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4496 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004497 }
4498
Sathya Perlace7faf02016-06-22 08:54:53 -04004499 /* If RoCE is supported stash away half the EQs for RoCE */
4500 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4501 res.max_evt_qs / 2 : res.max_evt_qs;
4502 adapter->res = res;
4503
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004504 /* If FW supports RSS default queue, then skip creating non-RSS
4505 * queue for non-IP traffic.
4506 */
4507 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4508 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4509
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304510 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4511 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004512 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304513 be_max_vfs(adapter));
4514 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4515 be_max_uc(adapter), be_max_mc(adapter),
4516 be_max_vlans(adapter));
4517
Sathya Perlae2617682016-06-22 08:54:54 -04004518 /* Ensure RX and TX queues are created in pairs at init time */
4519 adapter->cfg_num_rx_irqs =
4520 min_t(u16, netif_get_num_default_rss_queues(),
4521 be_max_qp_irqs(adapter));
4522 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304523 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004524}
4525
Sathya Perla39f1d942012-05-08 19:41:24 +00004526static int be_get_config(struct be_adapter *adapter)
4527{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004528 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304529 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004530
Suresh Reddy980df242015-12-30 01:29:03 -05004531 status = be_cmd_get_cntl_attributes(adapter);
4532 if (status)
4533 return status;
4534
Kalesh APe97e3cd2014-07-17 16:20:26 +05304535 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004536 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304537 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004538
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004539 if (!lancer_chip(adapter) && be_physfn(adapter))
4540 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4541
Sathya Perla6b085ba2015-02-23 04:20:09 -05004542 if (BEx_chip(adapter)) {
4543 level = be_cmd_get_fw_log_level(adapter);
4544 adapter->msg_enable =
4545 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4546 }
4547
4548 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004549 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4550 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004551
Vasundhara Volam21252372015-02-06 08:18:42 -05004552 be_cmd_query_port_name(adapter);
4553
4554 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304555 status = be_cmd_get_active_profile(adapter, &profile_id);
4556 if (!status)
4557 dev_info(&adapter->pdev->dev,
4558 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304559 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304560
Sathya Perla92bf14a2013-08-27 16:57:32 +05304561 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004562}
4563
Sathya Perla95046b92013-07-23 15:25:02 +05304564static int be_mac_setup(struct be_adapter *adapter)
4565{
4566 u8 mac[ETH_ALEN];
4567 int status;
4568
4569 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4570 status = be_cmd_get_perm_mac(adapter, mac);
4571 if (status)
4572 return status;
4573
4574 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4575 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304576 }
4577
Sathya Perla95046b92013-07-23 15:25:02 +05304578 return 0;
4579}
4580
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304581static void be_schedule_worker(struct be_adapter *adapter)
4582{
Sathya Perlab7172412016-07-27 05:26:18 -04004583 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304584 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4585}
4586
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304587static void be_destroy_err_recovery_workq(void)
4588{
4589 if (!be_err_recovery_workq)
4590 return;
4591
4592 flush_workqueue(be_err_recovery_workq);
4593 destroy_workqueue(be_err_recovery_workq);
4594 be_err_recovery_workq = NULL;
4595}
4596
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304597static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004598{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304599 struct be_error_recovery *err_rec = &adapter->error_recovery;
4600
4601 if (!be_err_recovery_workq)
4602 return;
4603
4604 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4605 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004606 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4607}
4608
Sathya Perla77071332013-08-27 16:57:34 +05304609static int be_setup_queues(struct be_adapter *adapter)
4610{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304611 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304612 int status;
4613
4614 status = be_evt_queues_create(adapter);
4615 if (status)
4616 goto err;
4617
4618 status = be_tx_qs_create(adapter);
4619 if (status)
4620 goto err;
4621
4622 status = be_rx_cqs_create(adapter);
4623 if (status)
4624 goto err;
4625
4626 status = be_mcc_queues_create(adapter);
4627 if (status)
4628 goto err;
4629
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304630 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4631 if (status)
4632 goto err;
4633
4634 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4635 if (status)
4636 goto err;
4637
Sathya Perla77071332013-08-27 16:57:34 +05304638 return 0;
4639err:
4640 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4641 return status;
4642}
4643
Ajit Khaparde62219062016-02-10 22:45:53 +05304644static int be_if_create(struct be_adapter *adapter)
4645{
4646 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4647 u32 cap_flags = be_if_cap_flags(adapter);
4648 int status;
4649
Sathya Perlab7172412016-07-27 05:26:18 -04004650 /* alloc required memory for other filtering fields */
4651 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4652 sizeof(*adapter->pmac_id), GFP_KERNEL);
4653 if (!adapter->pmac_id)
4654 return -ENOMEM;
4655
4656 adapter->mc_list = kcalloc(be_max_mc(adapter),
4657 sizeof(*adapter->mc_list), GFP_KERNEL);
4658 if (!adapter->mc_list)
4659 return -ENOMEM;
4660
4661 adapter->uc_list = kcalloc(be_max_uc(adapter),
4662 sizeof(*adapter->uc_list), GFP_KERNEL);
4663 if (!adapter->uc_list)
4664 return -ENOMEM;
4665
Sathya Perlae2617682016-06-22 08:54:54 -04004666 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304667 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4668
4669 en_flags &= cap_flags;
4670 /* will enable all the needed filter flags in be_open() */
4671 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4672 &adapter->if_handle, 0);
4673
Sathya Perlab7172412016-07-27 05:26:18 -04004674 if (status)
4675 return status;
4676
4677 return 0;
Ajit Khaparde62219062016-02-10 22:45:53 +05304678}
4679
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304680int be_update_queues(struct be_adapter *adapter)
4681{
4682 struct net_device *netdev = adapter->netdev;
4683 int status;
4684
4685 if (netif_running(netdev))
4686 be_close(netdev);
4687
4688 be_cancel_worker(adapter);
4689
4690 /* If any vectors have been shared with RoCE we cannot re-program
4691 * the MSIx table.
4692 */
4693 if (!adapter->num_msix_roce_vec)
4694 be_msix_disable(adapter);
4695
4696 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304697 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4698 if (status)
4699 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304700
4701 if (!msix_enabled(adapter)) {
4702 status = be_msix_enable(adapter);
4703 if (status)
4704 return status;
4705 }
4706
Ajit Khaparde62219062016-02-10 22:45:53 +05304707 status = be_if_create(adapter);
4708 if (status)
4709 return status;
4710
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304711 status = be_setup_queues(adapter);
4712 if (status)
4713 return status;
4714
4715 be_schedule_worker(adapter);
4716
4717 if (netif_running(netdev))
4718 status = be_open(netdev);
4719
4720 return status;
4721}
4722
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004723static inline int fw_major_num(const char *fw_ver)
4724{
4725 int fw_major = 0, i;
4726
4727 i = sscanf(fw_ver, "%d.", &fw_major);
4728 if (i != 1)
4729 return 0;
4730
4731 return fw_major;
4732}
4733
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304734/* If it is error recovery, FLR the PF
4735 * Else if any VFs are already enabled don't FLR the PF
4736 */
Sathya Perlaf962f842015-02-23 04:20:16 -05004737static bool be_reset_required(struct be_adapter *adapter)
4738{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304739 if (be_error_recovering(adapter))
4740 return true;
4741 else
4742 return pci_num_vf(adapter->pdev) == 0;
Sathya Perlaf962f842015-02-23 04:20:16 -05004743}
4744
4745/* Wait for the FW to be ready and perform the required initialization */
4746static int be_func_init(struct be_adapter *adapter)
4747{
4748 int status;
4749
4750 status = be_fw_wait_ready(adapter);
4751 if (status)
4752 return status;
4753
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304754 /* FW is now ready; clear errors to allow cmds/doorbell */
4755 be_clear_error(adapter, BE_CLEAR_ALL);
4756
Sathya Perlaf962f842015-02-23 04:20:16 -05004757 if (be_reset_required(adapter)) {
4758 status = be_cmd_reset_function(adapter);
4759 if (status)
4760 return status;
4761
4762 /* Wait for interrupts to quiesce after an FLR */
4763 msleep(100);
Sathya Perlaf962f842015-02-23 04:20:16 -05004764 }
4765
4766 /* Tell FW we're ready to fire cmds */
4767 status = be_cmd_fw_init(adapter);
4768 if (status)
4769 return status;
4770
4771 /* Allow interrupts for other ULPs running on NIC function */
4772 be_intr_set(adapter, true);
4773
4774 return 0;
4775}
4776
Sathya Perla5fb379e2009-06-18 00:02:59 +00004777static int be_setup(struct be_adapter *adapter)
4778{
Sathya Perla39f1d942012-05-08 19:41:24 +00004779 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004780 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004781
Sathya Perlaf962f842015-02-23 04:20:16 -05004782 status = be_func_init(adapter);
4783 if (status)
4784 return status;
4785
Sathya Perla30128032011-11-10 19:17:57 +00004786 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004787
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004788 if (!lancer_chip(adapter))
4789 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004790
Suresh Reddy980df242015-12-30 01:29:03 -05004791 /* invoke this cmd first to get pf_num and vf_num which are needed
4792 * for issuing profile related cmds
4793 */
4794 if (!BEx_chip(adapter)) {
4795 status = be_cmd_get_func_config(adapter, NULL);
4796 if (status)
4797 return status;
4798 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004799
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004800 status = be_get_config(adapter);
4801 if (status)
4802 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004803
Somnath Koturde2b1e02016-06-06 07:22:10 -04004804 if (!BE2_chip(adapter) && be_physfn(adapter))
4805 be_alloc_sriov_res(adapter);
4806
4807 status = be_get_resources(adapter);
4808 if (status)
4809 goto err;
4810
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004811 status = be_msix_enable(adapter);
4812 if (status)
4813 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004814
Kalesh APbcc84142015-08-05 03:27:48 -04004815 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304816 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004817 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004818 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004819
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304820 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4821 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304822 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304823 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004824 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004825 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004826
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004827 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004828
Sathya Perla95046b92013-07-23 15:25:02 +05304829 status = be_mac_setup(adapter);
4830 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004831 goto err;
4832
Kalesh APe97e3cd2014-07-17 16:20:26 +05304833 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304834 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004835
Somnath Koture9e2a902013-10-24 14:37:53 +05304836 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304837 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304838 adapter->fw_ver);
4839 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4840 }
4841
Kalesh AP00d594c2015-01-20 03:51:44 -05004842 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4843 adapter->rx_fc);
4844 if (status)
4845 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4846 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004847
Kalesh AP00d594c2015-01-20 03:51:44 -05004848 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4849 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004850
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304851 if (be_physfn(adapter))
4852 be_cmd_set_logical_link_config(adapter,
4853 IFLA_VF_LINK_STATE_AUTO, 0);
4854
Somnath Kotur884476b2016-06-22 08:54:55 -04004855 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4856 * confusing a linux bridge or OVS that it might be connected to.
4857 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4858 * when SRIOV is not enabled.
4859 */
4860 if (BE3_chip(adapter))
4861 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4862 PORT_FWD_TYPE_PASSTHRU, 0);
4863
Vasundhara Volambec84e62014-06-30 13:01:32 +05304864 if (adapter->num_vfs)
4865 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004866
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004867 status = be_cmd_get_phy_info(adapter);
4868 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004869 adapter->phy.fc_autoneg = 1;
4870
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304871 if (be_physfn(adapter) && !lancer_chip(adapter))
4872 be_cmd_set_features(adapter);
4873
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304874 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304875 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004876 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004877err:
4878 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004879 return status;
4880}
4881
Ivan Vecera66268732011-12-08 01:31:21 +00004882#ifdef CONFIG_NET_POLL_CONTROLLER
4883static void be_netpoll(struct net_device *netdev)
4884{
4885 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004886 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004887 int i;
4888
Sathya Perlae49cc342012-11-27 19:50:02 +00004889 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004890 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004891 napi_schedule(&eqo->napi);
4892 }
Ivan Vecera66268732011-12-08 01:31:21 +00004893}
4894#endif
4895
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004896int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4897{
4898 const struct firmware *fw;
4899 int status;
4900
4901 if (!netif_running(adapter->netdev)) {
4902 dev_err(&adapter->pdev->dev,
4903 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304904 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004905 }
4906
4907 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4908 if (status)
4909 goto fw_exit;
4910
4911 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4912
4913 if (lancer_chip(adapter))
4914 status = lancer_fw_download(adapter, fw);
4915 else
4916 status = be_fw_download(adapter, fw);
4917
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004918 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304919 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004920
Ajit Khaparde84517482009-09-04 03:12:16 +00004921fw_exit:
4922 release_firmware(fw);
4923 return status;
4924}
4925
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004926static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4927 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004928{
4929 struct be_adapter *adapter = netdev_priv(dev);
4930 struct nlattr *attr, *br_spec;
4931 int rem;
4932 int status = 0;
4933 u16 mode = 0;
4934
4935 if (!sriov_enabled(adapter))
4936 return -EOPNOTSUPP;
4937
4938 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004939 if (!br_spec)
4940 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004941
4942 nla_for_each_nested(attr, br_spec, rem) {
4943 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4944 continue;
4945
Thomas Grafb7c1a312014-11-26 13:42:17 +01004946 if (nla_len(attr) < sizeof(mode))
4947 return -EINVAL;
4948
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004949 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004950 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4951 return -EOPNOTSUPP;
4952
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004953 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4954 return -EINVAL;
4955
4956 status = be_cmd_set_hsw_config(adapter, 0, 0,
4957 adapter->if_handle,
4958 mode == BRIDGE_MODE_VEPA ?
4959 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004960 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004961 if (status)
4962 goto err;
4963
4964 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4965 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4966
4967 return status;
4968 }
4969err:
4970 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4971 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4972
4973 return status;
4974}
4975
4976static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004977 struct net_device *dev, u32 filter_mask,
4978 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004979{
4980 struct be_adapter *adapter = netdev_priv(dev);
4981 int status = 0;
4982 u8 hsw_mode;
4983
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004984 /* BE and Lancer chips support VEB mode only */
4985 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004986 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4987 if (!pci_sriov_get_totalvfs(adapter->pdev))
4988 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004989 hsw_mode = PORT_FWD_TYPE_VEB;
4990 } else {
4991 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004992 adapter->if_handle, &hsw_mode,
4993 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004994 if (status)
4995 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004996
4997 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4998 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004999 }
5000
5001 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5002 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01005003 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07005004 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005005}
5006
Sathya Perlab7172412016-07-27 05:26:18 -04005007static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5008 void (*func)(struct work_struct *))
5009{
5010 struct be_cmd_work *work;
5011
5012 work = kzalloc(sizeof(*work), GFP_ATOMIC);
5013 if (!work) {
5014 dev_err(&adapter->pdev->dev,
5015 "be_work memory allocation failed\n");
5016 return NULL;
5017 }
5018
5019 INIT_WORK(&work->work, func);
5020 work->adapter = adapter;
5021 return work;
5022}
5023
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005024/* VxLAN offload Notes:
5025 *
5026 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5027 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5028 * is expected to work across all types of IP tunnels once exported. Skyhawk
5029 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305030 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5031 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5032 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005033 *
5034 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5035 * adds more than one port, disable offloads and don't re-enable them again
5036 * until after all the tunnels are removed.
5037 */
Sathya Perlab7172412016-07-27 05:26:18 -04005038static void be_work_add_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305039{
Sathya Perlab7172412016-07-27 05:26:18 -04005040 struct be_cmd_work *cmd_work =
5041 container_of(work, struct be_cmd_work, work);
5042 struct be_adapter *adapter = cmd_work->adapter;
5043 struct net_device *netdev = adapter->netdev;
Sathya Perlac9c47142014-03-27 10:46:19 +05305044 struct device *dev = &adapter->pdev->dev;
Sathya Perlab7172412016-07-27 05:26:18 -04005045 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305046 int status;
5047
Jiri Benc1e5b3112015-09-17 16:11:13 +02005048 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5049 adapter->vxlan_port_aliases++;
Sathya Perlab7172412016-07-27 05:26:18 -04005050 goto done;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005051 }
5052
Sathya Perlac9c47142014-03-27 10:46:19 +05305053 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305054 dev_info(dev,
5055 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005056 dev_info(dev, "Disabling VxLAN offloads\n");
5057 adapter->vxlan_port_count++;
5058 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305059 }
5060
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005061 if (adapter->vxlan_port_count++ >= 1)
Sathya Perlab7172412016-07-27 05:26:18 -04005062 goto done;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005063
Sathya Perlac9c47142014-03-27 10:46:19 +05305064 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5065 OP_CONVERT_NORMAL_TO_TUNNEL);
5066 if (status) {
5067 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5068 goto err;
5069 }
5070
5071 status = be_cmd_set_vxlan_port(adapter, port);
5072 if (status) {
5073 dev_warn(dev, "Failed to add VxLAN port\n");
5074 goto err;
5075 }
5076 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5077 adapter->vxlan_port = port;
5078
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005079 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5080 NETIF_F_TSO | NETIF_F_TSO6 |
5081 NETIF_F_GSO_UDP_TUNNEL;
5082 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305083 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005084
Sathya Perlac9c47142014-03-27 10:46:19 +05305085 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5086 be16_to_cpu(port));
Sathya Perlab7172412016-07-27 05:26:18 -04005087 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305088err:
5089 be_disable_vxlan_offloads(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04005090done:
5091 kfree(cmd_work);
Sathya Perlac9c47142014-03-27 10:46:19 +05305092}
5093
Sathya Perlab7172412016-07-27 05:26:18 -04005094static void be_work_del_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305095{
Sathya Perlab7172412016-07-27 05:26:18 -04005096 struct be_cmd_work *cmd_work =
5097 container_of(work, struct be_cmd_work, work);
5098 struct be_adapter *adapter = cmd_work->adapter;
5099 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305100
5101 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005102 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305103
Jiri Benc1e5b3112015-09-17 16:11:13 +02005104 if (adapter->vxlan_port_aliases) {
5105 adapter->vxlan_port_aliases--;
Sathya Perlab7172412016-07-27 05:26:18 -04005106 goto out;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005107 }
5108
Sathya Perlac9c47142014-03-27 10:46:19 +05305109 be_disable_vxlan_offloads(adapter);
5110
5111 dev_info(&adapter->pdev->dev,
5112 "Disabled VxLAN offloads for UDP port %d\n",
5113 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005114done:
5115 adapter->vxlan_port_count--;
Sathya Perlab7172412016-07-27 05:26:18 -04005116out:
5117 kfree(cmd_work);
5118}
5119
5120static void be_cfg_vxlan_port(struct net_device *netdev,
5121 struct udp_tunnel_info *ti,
5122 void (*func)(struct work_struct *))
5123{
5124 struct be_adapter *adapter = netdev_priv(netdev);
5125 struct be_cmd_work *cmd_work;
5126
5127 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5128 return;
5129
5130 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5131 return;
5132
5133 cmd_work = be_alloc_work(adapter, func);
5134 if (cmd_work) {
5135 cmd_work->info.vxlan_port = ti->port;
5136 queue_work(be_wq, &cmd_work->work);
5137 }
5138}
5139
5140static void be_del_vxlan_port(struct net_device *netdev,
5141 struct udp_tunnel_info *ti)
5142{
5143 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5144}
5145
5146static void be_add_vxlan_port(struct net_device *netdev,
5147 struct udp_tunnel_info *ti)
5148{
5149 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
Sathya Perlac9c47142014-03-27 10:46:19 +05305150}
Joe Stringer725d5482014-11-13 16:38:13 -08005151
Jesse Gross5f352272014-12-23 22:37:26 -08005152static netdev_features_t be_features_check(struct sk_buff *skb,
5153 struct net_device *dev,
5154 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005155{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305156 struct be_adapter *adapter = netdev_priv(dev);
5157 u8 l4_hdr = 0;
5158
Vlad Yasevich9c6cfd52017-05-23 13:38:42 -04005159 /* The code below restricts offload features for some tunneled and
5160 * Q-in-Q packets.
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305161 * Offload features for normal (non tunnel) packets are unchanged.
5162 */
Vlad Yasevich9c6cfd52017-05-23 13:38:42 -04005163 features = vlan_features_check(skb, features);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305164 if (!skb->encapsulation ||
5165 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5166 return features;
5167
5168 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5169 * should disable tunnel offload features if it's not a VxLAN packet,
5170 * as tunnel offloads have been enabled only for VxLAN. This is done to
5171 * allow other tunneled traffic like GRE work fine while VxLAN
5172 * offloads are configured in Skyhawk-R.
5173 */
5174 switch (vlan_get_protocol(skb)) {
5175 case htons(ETH_P_IP):
5176 l4_hdr = ip_hdr(skb)->protocol;
5177 break;
5178 case htons(ETH_P_IPV6):
5179 l4_hdr = ipv6_hdr(skb)->nexthdr;
5180 break;
5181 default:
5182 return features;
5183 }
5184
5185 if (l4_hdr != IPPROTO_UDP ||
5186 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5187 skb->inner_protocol != htons(ETH_P_TEB) ||
5188 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
Sabrina Dubroca49fc90b2017-01-03 16:26:04 +01005189 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5190 !adapter->vxlan_port ||
5191 udp_hdr(skb)->dest != adapter->vxlan_port)
Tom Herberta1882222015-12-14 11:19:43 -08005192 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305193
5194 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005195}
Sathya Perlac9c47142014-03-27 10:46:19 +05305196
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305197static int be_get_phys_port_id(struct net_device *dev,
5198 struct netdev_phys_item_id *ppid)
5199{
5200 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5201 struct be_adapter *adapter = netdev_priv(dev);
5202 u8 *id;
5203
5204 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5205 return -ENOSPC;
5206
5207 ppid->id[0] = adapter->hba_port_num + 1;
5208 id = &ppid->id[1];
5209 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5210 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5211 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5212
5213 ppid->id_len = id_len;
5214
5215 return 0;
5216}
5217
Sathya Perlab7172412016-07-27 05:26:18 -04005218static void be_set_rx_mode(struct net_device *dev)
5219{
5220 struct be_adapter *adapter = netdev_priv(dev);
5221 struct be_cmd_work *work;
5222
5223 work = be_alloc_work(adapter, be_work_set_rx_mode);
5224 if (work)
5225 queue_work(be_wq, &work->work);
5226}
5227
stephen hemmingere5686ad2012-01-05 19:10:25 +00005228static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005229 .ndo_open = be_open,
5230 .ndo_stop = be_close,
5231 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005232 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005233 .ndo_set_mac_address = be_mac_addr_set,
5234 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005235 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005236 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005237 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5238 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005239 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005240 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005241 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005242 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305243 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005244 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005245#ifdef CONFIG_NET_POLL_CONTROLLER
5246 .ndo_poll_controller = be_netpoll,
5247#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005248 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5249 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305250#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305251 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305252#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005253 .ndo_udp_tunnel_add = be_add_vxlan_port,
5254 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005255 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305256 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005257};
5258
5259static void be_netdev_init(struct net_device *netdev)
5260{
5261 struct be_adapter *adapter = netdev_priv(netdev);
5262
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005263 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005264 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005265 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305266 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005267 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005268
5269 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005270 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005271
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005272 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005273 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005274
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005275 netdev->priv_flags |= IFF_UNICAST_FLT;
5276
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005277 netdev->flags |= IFF_MULTICAST;
5278
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305279 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005280
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005281 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005282
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005283 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005284}
5285
Kalesh AP87ac1a52015-02-23 04:20:15 -05005286static void be_cleanup(struct be_adapter *adapter)
5287{
5288 struct net_device *netdev = adapter->netdev;
5289
5290 rtnl_lock();
5291 netif_device_detach(netdev);
5292 if (netif_running(netdev))
5293 be_close(netdev);
5294 rtnl_unlock();
5295
5296 be_clear(adapter);
5297}
5298
Kalesh AP484d76f2015-02-23 04:20:14 -05005299static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005300{
Kalesh APd0e1b312015-02-23 04:20:12 -05005301 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005302 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005303
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005304 status = be_setup(adapter);
5305 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005306 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005307
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005308 rtnl_lock();
5309 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005310 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005311 rtnl_unlock();
5312
5313 if (status)
5314 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005315
Kalesh APd0e1b312015-02-23 04:20:12 -05005316 netif_device_attach(netdev);
5317
Kalesh AP484d76f2015-02-23 04:20:14 -05005318 return 0;
5319}
5320
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305321static void be_soft_reset(struct be_adapter *adapter)
5322{
5323 u32 val;
5324
5325 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5326 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5327 val |= SLIPORT_SOFTRESET_SR_MASK;
5328 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5329}
5330
5331static bool be_err_is_recoverable(struct be_adapter *adapter)
5332{
5333 struct be_error_recovery *err_rec = &adapter->error_recovery;
5334 unsigned long initial_idle_time =
5335 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5336 unsigned long recovery_interval =
5337 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5338 u16 ue_err_code;
5339 u32 val;
5340
5341 val = be_POST_stage_get(adapter);
5342 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5343 return false;
5344 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5345 if (ue_err_code == 0)
5346 return false;
5347
5348 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5349 ue_err_code);
5350
5351 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5352 dev_err(&adapter->pdev->dev,
5353 "Cannot recover within %lu sec from driver load\n",
5354 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5355 return false;
5356 }
5357
5358 if (err_rec->last_recovery_time &&
5359 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5360 dev_err(&adapter->pdev->dev,
5361 "Cannot recover within %lu sec from last recovery\n",
5362 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5363 return false;
5364 }
5365
5366 if (ue_err_code == err_rec->last_err_code) {
5367 dev_err(&adapter->pdev->dev,
5368 "Cannot recover from a consecutive TPE error\n");
5369 return false;
5370 }
5371
5372 err_rec->last_recovery_time = jiffies;
5373 err_rec->last_err_code = ue_err_code;
5374 return true;
5375}
5376
5377static int be_tpe_recover(struct be_adapter *adapter)
5378{
5379 struct be_error_recovery *err_rec = &adapter->error_recovery;
5380 int status = -EAGAIN;
5381 u32 val;
5382
5383 switch (err_rec->recovery_state) {
5384 case ERR_RECOVERY_ST_NONE:
5385 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5386 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5387 break;
5388
5389 case ERR_RECOVERY_ST_DETECT:
5390 val = be_POST_stage_get(adapter);
5391 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5392 POST_STAGE_RECOVERABLE_ERR) {
5393 dev_err(&adapter->pdev->dev,
5394 "Unrecoverable HW error detected: 0x%x\n", val);
5395 status = -EINVAL;
5396 err_rec->resched_delay = 0;
5397 break;
5398 }
5399
5400 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5401
5402 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5403 * milliseconds before it checks for final error status in
5404 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5405 * If it does, then PF0 initiates a Soft Reset.
5406 */
5407 if (adapter->pf_num == 0) {
5408 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5409 err_rec->resched_delay = err_rec->ue_to_reset_time -
5410 ERR_RECOVERY_UE_DETECT_DURATION;
5411 break;
5412 }
5413
5414 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5415 err_rec->resched_delay = err_rec->ue_to_poll_time -
5416 ERR_RECOVERY_UE_DETECT_DURATION;
5417 break;
5418
5419 case ERR_RECOVERY_ST_RESET:
5420 if (!be_err_is_recoverable(adapter)) {
5421 dev_err(&adapter->pdev->dev,
5422 "Failed to meet recovery criteria\n");
5423 status = -EIO;
5424 err_rec->resched_delay = 0;
5425 break;
5426 }
5427 be_soft_reset(adapter);
5428 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5429 err_rec->resched_delay = err_rec->ue_to_poll_time -
5430 err_rec->ue_to_reset_time;
5431 break;
5432
5433 case ERR_RECOVERY_ST_PRE_POLL:
5434 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5435 err_rec->resched_delay = 0;
5436 status = 0; /* done */
5437 break;
5438
5439 default:
5440 status = -EINVAL;
5441 err_rec->resched_delay = 0;
5442 break;
5443 }
5444
5445 return status;
5446}
5447
Kalesh AP484d76f2015-02-23 04:20:14 -05005448static int be_err_recover(struct be_adapter *adapter)
5449{
Kalesh AP484d76f2015-02-23 04:20:14 -05005450 int status;
5451
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305452 if (!lancer_chip(adapter)) {
5453 if (!adapter->error_recovery.recovery_supported ||
5454 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5455 return -EIO;
5456 status = be_tpe_recover(adapter);
5457 if (status)
5458 goto err;
5459 }
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305460
5461 /* Wait for adapter to reach quiescent state before
5462 * destroying queues
5463 */
5464 status = be_fw_wait_ready(adapter);
5465 if (status)
5466 goto err;
5467
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305468 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5469
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305470 be_cleanup(adapter);
5471
Kalesh AP484d76f2015-02-23 04:20:14 -05005472 status = be_resume(adapter);
5473 if (status)
5474 goto err;
5475
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305476 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5477
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005478err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005479 return status;
5480}
5481
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005482static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005483{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305484 struct be_error_recovery *err_rec =
5485 container_of(work, struct be_error_recovery,
5486 err_detection_work.work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005487 struct be_adapter *adapter =
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305488 container_of(err_rec, struct be_adapter,
5489 error_recovery);
5490 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305491 struct device *dev = &adapter->pdev->dev;
5492 int recovery_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005493
5494 be_detect_error(adapter);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305495 if (!be_check_error(adapter, BE_ERROR_HW))
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305496 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005497
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305498 recovery_status = be_err_recover(adapter);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305499 if (!recovery_status) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305500 err_rec->recovery_retries = 0;
5501 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305502 dev_info(dev, "Adapter recovery successful\n");
5503 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305504 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5505 /* BEx/SH recovery state machine */
5506 if (adapter->pf_num == 0 &&
5507 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5508 dev_err(&adapter->pdev->dev,
5509 "Adapter recovery in progress\n");
5510 resched_delay = err_rec->resched_delay;
5511 goto reschedule_task;
5512 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305513 /* For VFs, check if PF have allocated resources
5514 * every second.
5515 */
5516 dev_err(dev, "Re-trying adapter recovery\n");
5517 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305518 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5519 ERR_RECOVERY_MAX_RETRY_COUNT) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305520 /* In case of another error during recovery, it takes 30 sec
5521 * for adapter to come out of error. Retry error recovery after
5522 * this time interval.
5523 */
5524 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305525 resched_delay = ERR_RECOVERY_RETRY_DELAY;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305526 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305527 } else {
5528 dev_err(dev, "Adapter recovery failed\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305529 dev_err(dev, "Please reboot server to recover\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005530 }
5531
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305532 return;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305533
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305534reschedule_task:
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305535 be_schedule_err_detection(adapter, resched_delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005536}
5537
Vasundhara Volam21252372015-02-06 08:18:42 -05005538static void be_log_sfp_info(struct be_adapter *adapter)
5539{
5540 int status;
5541
5542 status = be_cmd_query_sfp_info(adapter);
5543 if (!status) {
5544 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305545 "Port %c: %s Vendor: %s part no: %s",
5546 adapter->port_name,
5547 be_misconfig_evt_port_state[adapter->phy_state],
5548 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005549 adapter->phy.vendor_pn);
5550 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305551 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005552}
5553
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005554static void be_worker(struct work_struct *work)
5555{
5556 struct be_adapter *adapter =
5557 container_of(work, struct be_adapter, work.work);
5558 struct be_rx_obj *rxo;
5559 int i;
5560
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005561 if (be_physfn(adapter) &&
5562 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5563 be_cmd_get_die_temperature(adapter);
5564
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005565 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005566 * mcc completions
5567 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005568 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005569 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005570 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005571 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005572 goto reschedule;
5573 }
5574
5575 if (!adapter->stats_cmd_sent) {
5576 if (lancer_chip(adapter))
5577 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305578 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005579 else
5580 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5581 }
5582
5583 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305584 /* Replenish RX-queues starved due to memory
5585 * allocation failures.
5586 */
5587 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305588 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005589 }
5590
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005591 /* EQ-delay update for Skyhawk is done while notifying EQ */
5592 if (!skyhawk_chip(adapter))
5593 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005594
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305595 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005596 be_log_sfp_info(adapter);
5597
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005598reschedule:
5599 adapter->work_counter++;
Sathya Perlab7172412016-07-27 05:26:18 -04005600 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005601}
5602
Sathya Perla78fad34e2015-02-23 04:20:08 -05005603static void be_unmap_pci_bars(struct be_adapter *adapter)
5604{
5605 if (adapter->csr)
5606 pci_iounmap(adapter->pdev, adapter->csr);
5607 if (adapter->db)
5608 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005609 if (adapter->pcicfg && adapter->pcicfg_mapped)
5610 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005611}
5612
5613static int db_bar(struct be_adapter *adapter)
5614{
Kalesh AP18c57c72015-05-06 05:30:38 -04005615 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005616 return 0;
5617 else
5618 return 4;
5619}
5620
5621static int be_roce_map_pci_bars(struct be_adapter *adapter)
5622{
5623 if (skyhawk_chip(adapter)) {
5624 adapter->roce_db.size = 4096;
5625 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5626 db_bar(adapter));
5627 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5628 db_bar(adapter));
5629 }
5630 return 0;
5631}
5632
5633static int be_map_pci_bars(struct be_adapter *adapter)
5634{
David S. Miller0fa74a42015-03-20 18:51:09 -04005635 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005636 u8 __iomem *addr;
5637 u32 sli_intf;
5638
5639 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5640 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5641 SLI_INTF_FAMILY_SHIFT;
5642 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5643
5644 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a42015-03-20 18:51:09 -04005645 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005646 if (!adapter->csr)
5647 return -ENOMEM;
5648 }
5649
David S. Miller0fa74a42015-03-20 18:51:09 -04005650 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005651 if (!addr)
5652 goto pci_map_err;
5653 adapter->db = addr;
5654
David S. Miller0fa74a42015-03-20 18:51:09 -04005655 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5656 if (be_physfn(adapter)) {
5657 /* PCICFG is the 2nd BAR in BE2 */
5658 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5659 if (!addr)
5660 goto pci_map_err;
5661 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005662 adapter->pcicfg_mapped = true;
David S. Miller0fa74a42015-03-20 18:51:09 -04005663 } else {
5664 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005665 adapter->pcicfg_mapped = false;
David S. Miller0fa74a42015-03-20 18:51:09 -04005666 }
5667 }
5668
Sathya Perla78fad34e2015-02-23 04:20:08 -05005669 be_roce_map_pci_bars(adapter);
5670 return 0;
5671
5672pci_map_err:
David S. Miller0fa74a42015-03-20 18:51:09 -04005673 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005674 be_unmap_pci_bars(adapter);
5675 return -ENOMEM;
5676}
5677
5678static void be_drv_cleanup(struct be_adapter *adapter)
5679{
5680 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5681 struct device *dev = &adapter->pdev->dev;
5682
5683 if (mem->va)
5684 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5685
5686 mem = &adapter->rx_filter;
5687 if (mem->va)
5688 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5689
5690 mem = &adapter->stats_cmd;
5691 if (mem->va)
5692 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5693}
5694
5695/* Allocate and initialize various fields in be_adapter struct */
5696static int be_drv_init(struct be_adapter *adapter)
5697{
5698 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5699 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5700 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5701 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5702 struct device *dev = &adapter->pdev->dev;
5703 int status = 0;
5704
5705 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305706 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5707 &mbox_mem_alloc->dma,
5708 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005709 if (!mbox_mem_alloc->va)
5710 return -ENOMEM;
5711
5712 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5713 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5714 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005715
5716 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5717 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5718 &rx_filter->dma, GFP_KERNEL);
5719 if (!rx_filter->va) {
5720 status = -ENOMEM;
5721 goto free_mbox;
5722 }
5723
5724 if (lancer_chip(adapter))
5725 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5726 else if (BE2_chip(adapter))
5727 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5728 else if (BE3_chip(adapter))
5729 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5730 else
5731 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5732 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5733 &stats_cmd->dma, GFP_KERNEL);
5734 if (!stats_cmd->va) {
5735 status = -ENOMEM;
5736 goto free_rx_filter;
5737 }
5738
5739 mutex_init(&adapter->mbox_lock);
Sathya Perlab7172412016-07-27 05:26:18 -04005740 mutex_init(&adapter->mcc_lock);
5741 mutex_init(&adapter->rx_filter_lock);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005742 spin_lock_init(&adapter->mcc_cq_lock);
5743 init_completion(&adapter->et_cmd_compl);
5744
5745 pci_save_state(adapter->pdev);
5746
5747 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305748
5749 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5750 adapter->error_recovery.resched_delay = 0;
5751 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005752 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005753
5754 adapter->rx_fc = true;
5755 adapter->tx_fc = true;
5756
5757 /* Must be a power of 2 or else MODULO will BUG_ON */
5758 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005759
5760 return 0;
5761
5762free_rx_filter:
5763 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5764free_mbox:
5765 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5766 mbox_mem_alloc->dma);
5767 return status;
5768}
5769
5770static void be_remove(struct pci_dev *pdev)
5771{
5772 struct be_adapter *adapter = pci_get_drvdata(pdev);
5773
5774 if (!adapter)
5775 return;
5776
5777 be_roce_dev_remove(adapter);
5778 be_intr_set(adapter, false);
5779
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005780 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005781
5782 unregister_netdev(adapter->netdev);
5783
5784 be_clear(adapter);
5785
Somnath Koturf72099e2016-09-07 19:57:50 +05305786 if (!pci_vfs_assigned(adapter->pdev))
5787 be_cmd_reset_function(adapter);
5788
Sathya Perla78fad34e2015-02-23 04:20:08 -05005789 /* tell fw we're done with firing cmds */
5790 be_cmd_fw_clean(adapter);
5791
5792 be_unmap_pci_bars(adapter);
5793 be_drv_cleanup(adapter);
5794
5795 pci_disable_pcie_error_reporting(pdev);
5796
5797 pci_release_regions(pdev);
5798 pci_disable_device(pdev);
5799
5800 free_netdev(adapter->netdev);
5801}
5802
Arnd Bergmann9a032592015-05-18 23:06:45 +02005803static ssize_t be_hwmon_show_temp(struct device *dev,
5804 struct device_attribute *dev_attr,
5805 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305806{
5807 struct be_adapter *adapter = dev_get_drvdata(dev);
5808
5809 /* Unit: millidegree Celsius */
5810 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5811 return -EIO;
5812 else
5813 return sprintf(buf, "%u\n",
5814 adapter->hwmon_info.be_on_die_temp * 1000);
5815}
5816
5817static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5818 be_hwmon_show_temp, NULL, 1);
5819
5820static struct attribute *be_hwmon_attrs[] = {
5821 &sensor_dev_attr_temp1_input.dev_attr.attr,
5822 NULL
5823};
5824
5825ATTRIBUTE_GROUPS(be_hwmon);
5826
Sathya Perlad3791422012-09-28 04:39:44 +00005827static char *mc_name(struct be_adapter *adapter)
5828{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305829 char *str = ""; /* default */
5830
5831 switch (adapter->mc_type) {
5832 case UMC:
5833 str = "UMC";
5834 break;
5835 case FLEX10:
5836 str = "FLEX10";
5837 break;
5838 case vNIC1:
5839 str = "vNIC-1";
5840 break;
5841 case nPAR:
5842 str = "nPAR";
5843 break;
5844 case UFP:
5845 str = "UFP";
5846 break;
5847 case vNIC2:
5848 str = "vNIC-2";
5849 break;
5850 default:
5851 str = "";
5852 }
5853
5854 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005855}
5856
5857static inline char *func_name(struct be_adapter *adapter)
5858{
5859 return be_physfn(adapter) ? "PF" : "VF";
5860}
5861
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005862static inline char *nic_name(struct pci_dev *pdev)
5863{
5864 switch (pdev->device) {
5865 case OC_DEVICE_ID1:
5866 return OC_NAME;
5867 case OC_DEVICE_ID2:
5868 return OC_NAME_BE;
5869 case OC_DEVICE_ID3:
5870 case OC_DEVICE_ID4:
5871 return OC_NAME_LANCER;
5872 case BE_DEVICE_ID2:
5873 return BE3_NAME;
5874 case OC_DEVICE_ID5:
5875 case OC_DEVICE_ID6:
5876 return OC_NAME_SH;
5877 default:
5878 return BE_NAME;
5879 }
5880}
5881
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005882static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005883{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005884 struct be_adapter *adapter;
5885 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005886 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005887
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305888 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5889
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005890 status = pci_enable_device(pdev);
5891 if (status)
5892 goto do_none;
5893
5894 status = pci_request_regions(pdev, DRV_NAME);
5895 if (status)
5896 goto disable_dev;
5897 pci_set_master(pdev);
5898
Sathya Perla7f640062012-06-05 19:37:20 +00005899 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305900 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005901 status = -ENOMEM;
5902 goto rel_reg;
5903 }
5904 adapter = netdev_priv(netdev);
5905 adapter->pdev = pdev;
5906 pci_set_drvdata(pdev, adapter);
5907 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005908 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005909
Russell King4c15c242013-06-26 23:49:11 +01005910 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005911 if (!status) {
5912 netdev->features |= NETIF_F_HIGHDMA;
5913 } else {
Russell King4c15c242013-06-26 23:49:11 +01005914 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005915 if (status) {
5916 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5917 goto free_netdev;
5918 }
5919 }
5920
Kalesh AP2f951a92014-09-12 17:39:21 +05305921 status = pci_enable_pcie_error_reporting(pdev);
5922 if (!status)
5923 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005924
Sathya Perla78fad34e2015-02-23 04:20:08 -05005925 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005926 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005927 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005928
Sathya Perla78fad34e2015-02-23 04:20:08 -05005929 status = be_drv_init(adapter);
5930 if (status)
5931 goto unmap_bars;
5932
Sathya Perla5fb379e2009-06-18 00:02:59 +00005933 status = be_setup(adapter);
5934 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005935 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005936
Sathya Perla3abcded2010-10-03 22:12:27 -07005937 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005938 status = register_netdev(netdev);
5939 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005940 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005941
Parav Pandit045508a2012-03-26 14:27:13 +00005942 be_roce_dev_add(adapter);
5943
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305944 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305945 adapter->error_recovery.probe_time = jiffies;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005946
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305947 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005948 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305949 adapter->hwmon_info.hwmon_dev =
5950 devm_hwmon_device_register_with_groups(&pdev->dev,
5951 DRV_NAME,
5952 adapter,
5953 be_hwmon_groups);
5954 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5955 }
5956
Sathya Perlad3791422012-09-28 04:39:44 +00005957 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005958 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005959
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005960 return 0;
5961
Sathya Perla5fb379e2009-06-18 00:02:59 +00005962unsetup:
5963 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005964drv_cleanup:
5965 be_drv_cleanup(adapter);
5966unmap_bars:
5967 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005968free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005969 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005970rel_reg:
5971 pci_release_regions(pdev);
5972disable_dev:
5973 pci_disable_device(pdev);
5974do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005975 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005976 return status;
5977}
5978
5979static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5980{
5981 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005982
Ajit Khaparded4360d62013-11-22 12:51:09 -06005983 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005984 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005985
Kalesh AP87ac1a52015-02-23 04:20:15 -05005986 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005987
5988 pci_save_state(pdev);
5989 pci_disable_device(pdev);
5990 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5991 return 0;
5992}
5993
Kalesh AP484d76f2015-02-23 04:20:14 -05005994static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005995{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005996 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005997 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005998
5999 status = pci_enable_device(pdev);
6000 if (status)
6001 return status;
6002
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006003 pci_restore_state(pdev);
6004
Kalesh AP484d76f2015-02-23 04:20:14 -05006005 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00006006 if (status)
6007 return status;
6008
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306009 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006010
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006011 return 0;
6012}
6013
Sathya Perla82456b02010-02-17 01:35:37 +00006014/*
6015 * An FLR will stop BE from DMAing any data.
6016 */
6017static void be_shutdown(struct pci_dev *pdev)
6018{
6019 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006020
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006021 if (!adapter)
6022 return;
Sathya Perla82456b02010-02-17 01:35:37 +00006023
Devesh Sharmad114f992014-06-10 19:32:15 +05306024 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00006025 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006026 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00006027
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006028 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006029
Ajit Khaparde57841862011-04-06 18:08:43 +00006030 be_cmd_reset_function(adapter);
6031
Sathya Perla82456b02010-02-17 01:35:37 +00006032 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006033}
6034
Sathya Perlacf588472010-02-14 21:22:01 +00006035static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05306036 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00006037{
6038 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006039
6040 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6041
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306042 be_roce_dev_remove(adapter);
6043
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306044 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6045 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00006046
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006047 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00006048
Kalesh AP87ac1a52015-02-23 04:20:15 -05006049 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006050 }
Sathya Perlacf588472010-02-14 21:22:01 +00006051
6052 if (state == pci_channel_io_perm_failure)
6053 return PCI_ERS_RESULT_DISCONNECT;
6054
6055 pci_disable_device(pdev);
6056
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006057 /* The error could cause the FW to trigger a flash debug dump.
6058 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006059 * can cause it not to recover; wait for it to finish.
6060 * Wait only for first function as it is needed only once per
6061 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006062 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006063 if (pdev->devfn == 0)
6064 ssleep(30);
6065
Sathya Perlacf588472010-02-14 21:22:01 +00006066 return PCI_ERS_RESULT_NEED_RESET;
6067}
6068
6069static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6070{
6071 struct be_adapter *adapter = pci_get_drvdata(pdev);
6072 int status;
6073
6074 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006075
6076 status = pci_enable_device(pdev);
6077 if (status)
6078 return PCI_ERS_RESULT_DISCONNECT;
6079
6080 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006081 pci_restore_state(pdev);
6082
6083 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00006084 dev_info(&adapter->pdev->dev,
6085 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006086 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006087 if (status)
6088 return PCI_ERS_RESULT_DISCONNECT;
6089
Sathya Perlad6b6d982012-09-05 01:56:48 +00006090 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306091 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00006092 return PCI_ERS_RESULT_RECOVERED;
6093}
6094
6095static void be_eeh_resume(struct pci_dev *pdev)
6096{
6097 int status = 0;
6098 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006099
6100 dev_info(&adapter->pdev->dev, "EEH resume\n");
6101
6102 pci_save_state(pdev);
6103
Kalesh AP484d76f2015-02-23 04:20:14 -05006104 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006105 if (status)
6106 goto err;
6107
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306108 be_roce_dev_add(adapter);
6109
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306110 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00006111 return;
6112err:
6113 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006114}
6115
Vasundhara Volamace40af2015-03-04 00:44:34 -05006116static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6117{
6118 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006119 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05006120 int status;
6121
6122 if (!num_vfs)
6123 be_vf_clear(adapter);
6124
6125 adapter->num_vfs = num_vfs;
6126
6127 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6128 dev_warn(&pdev->dev,
6129 "Cannot disable VFs while they are assigned\n");
6130 return -EBUSY;
6131 }
6132
6133 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6134 * are equally distributed across the max-number of VFs. The user may
6135 * request only a subset of the max-vfs to be enabled.
6136 * Based on num_vfs, redistribute the resources across num_vfs so that
6137 * each VF will have access to more number of resources.
6138 * This facility is not available in BE3 FW.
6139 * Also, this is done by FW in Lancer chip.
6140 */
6141 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006142 be_calculate_vf_res(adapter, adapter->num_vfs,
6143 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006144 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006145 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006146 if (status)
6147 dev_err(&pdev->dev,
6148 "Failed to optimize SR-IOV resources\n");
6149 }
6150
6151 status = be_get_resources(adapter);
6152 if (status)
6153 return be_cmd_status(status);
6154
6155 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6156 rtnl_lock();
6157 status = be_update_queues(adapter);
6158 rtnl_unlock();
6159 if (status)
6160 return be_cmd_status(status);
6161
6162 if (adapter->num_vfs)
6163 status = be_vf_setup(adapter);
6164
6165 if (!status)
6166 return adapter->num_vfs;
6167
6168 return 0;
6169}
6170
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006171static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006172 .error_detected = be_eeh_err_detected,
6173 .slot_reset = be_eeh_reset,
6174 .resume = be_eeh_resume,
6175};
6176
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006177static struct pci_driver be_driver = {
6178 .name = DRV_NAME,
6179 .id_table = be_dev_ids,
6180 .probe = be_probe,
6181 .remove = be_remove,
6182 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006183 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006184 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006185 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006186 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006187};
6188
6189static int __init be_init_module(void)
6190{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306191 int status;
6192
Joe Perches8e95a202009-12-03 07:58:21 +00006193 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6194 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006195 printk(KERN_WARNING DRV_NAME
6196 " : Module param rx_frag_size must be 2048/4096/8192."
6197 " Using 2048\n");
6198 rx_frag_size = 2048;
6199 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006200
Vasundhara Volamace40af2015-03-04 00:44:34 -05006201 if (num_vfs > 0) {
6202 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6203 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6204 }
6205
Sathya Perlab7172412016-07-27 05:26:18 -04006206 be_wq = create_singlethread_workqueue("be_wq");
6207 if (!be_wq) {
6208 pr_warn(DRV_NAME "workqueue creation failed\n");
6209 return -1;
6210 }
6211
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306212 be_err_recovery_workq =
6213 create_singlethread_workqueue("be_err_recover");
6214 if (!be_err_recovery_workq)
6215 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6216
6217 status = pci_register_driver(&be_driver);
6218 if (status) {
6219 destroy_workqueue(be_wq);
6220 be_destroy_err_recovery_workq();
6221 }
6222 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006223}
6224module_init(be_init_module);
6225
6226static void __exit be_exit_module(void)
6227{
6228 pci_unregister_driver(&be_driver);
Sathya Perlab7172412016-07-27 05:26:18 -04006229
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306230 be_destroy_err_recovery_workq();
6231
Sathya Perlab7172412016-07-27 05:26:18 -04006232 if (be_wq)
6233 destroy_workqueue(be_wq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006234}
6235module_exit(be_exit_module);