blob: 95d2fa3de0351e7cbc803c53e35817aece431f7c [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053044/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
47struct workqueue_struct *be_err_recovery_workq;
48
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070050 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070051 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070052 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000054 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000055 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000056 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000057 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070058 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
Sathya Perlab7172412016-07-27 05:26:18 -040061
62/* Workqueue used by all functions for defering cmd calls to the adapter */
63struct workqueue_struct *be_wq;
64
Ajit Khaparde7c185272010-07-29 06:16:33 +000065/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070066static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000067 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053095 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000099};
Kalesh APe2fb1af2014-09-19 15:46:58 +0530100
Ajit Khaparde7c185272010-07-29 06:16:33 +0000101/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -0700102static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +0000103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530124 "ECRC",
125 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700126 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000134 "Unknown"
135};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530145
Sathya Perla1cfafab2012-02-23 18:50:15 +0000146 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000149 mem->va = NULL;
150 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530154 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000165 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return 0;
167}
168
Somnath Kotur68c45a22013-03-14 02:42:07 +0000169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170{
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530174 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000179 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000181 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000183
Sathya Perladb3ea782011-08-22 19:41:52 +0000184 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530196 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530207
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000213
214 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220{
221 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530222
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000228
229 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000230 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000241
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530242 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000243 return;
244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253}
254
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256{
257 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530258
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000262
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530263 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000264 return;
265
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270}
271
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700272static int be_mac_addr_set(struct net_device *netdev, void *p)
273{
274 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530275 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700276 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530277 int status;
278 u8 mac[ETH_ALEN];
279 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700280
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000281 if (!is_valid_ether_addr(addr->sa_data))
282 return -EADDRNOTAVAIL;
283
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530284 /* Proceed further only if, User provided MAC is different
285 * from active MAC
286 */
287 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
288 return 0;
289
Kalesh APbcc84142015-08-05 03:27:48 -0400290 /* if device is not running, copy MAC to netdev->dev_addr */
291 if (!netif_running(netdev))
292 goto done;
293
Sathya Perla5a712c12013-07-23 15:24:59 +0530294 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
295 * privilege or if PF did not provision the new MAC address.
296 * On BE3, this cmd will always fail if the VF doesn't have the
297 * FILTMGMT privilege. This failure is OK, only if the PF programmed
298 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000299 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530300 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
301 adapter->if_handle, &adapter->pmac_id[0], 0);
302 if (!status) {
303 curr_pmac_id = adapter->pmac_id[0];
304
305 /* Delete the old programmed MAC. This call may fail if the
306 * old MAC was already deleted by the PF driver.
307 */
308 if (adapter->pmac_id[0] != old_pmac_id)
309 be_cmd_pmac_del(adapter, adapter->if_handle,
310 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000311 }
312
Sathya Perla5a712c12013-07-23 15:24:59 +0530313 /* Decide if the new MAC is successfully activated only after
314 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000315 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530316 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
317 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000318 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000319 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320
Sathya Perla5a712c12013-07-23 15:24:59 +0530321 /* The MAC change did not happen, either due to lack of privilege
322 * or PF didn't pre-provision.
323 */
dingtianhong61d23e92013-12-30 15:40:43 +0800324 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530325 status = -EPERM;
326 goto err;
327 }
Kalesh APbcc84142015-08-05 03:27:48 -0400328done:
329 ether_addr_copy(netdev->dev_addr, addr->sa_data);
330 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000331 return 0;
332err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530333 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700334 return status;
335}
336
Sathya Perlaca34fe32012-11-06 17:48:56 +0000337/* BE2 supports only v0 cmd */
338static void *hw_stats_from_cmd(struct be_adapter *adapter)
339{
340 if (BE2_chip(adapter)) {
341 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
342
343 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500344 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000345 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
346
347 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500348 } else {
349 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
350
351 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000352 }
353}
354
355/* BE2 supports only v0 cmd */
356static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
357{
358 if (BE2_chip(adapter)) {
359 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
360
361 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500362 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000363 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
364
365 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500366 } else {
367 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
368
369 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000370 }
371}
372
373static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000375 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
376 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
377 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000378 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000379 &rxf_stats->port[adapter->port_num];
380 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000381
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 drvs->rx_pause_frames = port_stats->rx_pause_frames;
384 drvs->rx_crc_errors = port_stats->rx_crc_errors;
385 drvs->rx_control_frames = port_stats->rx_control_frames;
386 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
387 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
388 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
389 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
390 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
391 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
392 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
393 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
394 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
395 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
396 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000397 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000398 drvs->rx_dropped_header_too_small =
399 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000400 drvs->rx_address_filtered =
401 port_stats->rx_address_filtered +
402 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000403 drvs->rx_alignment_symbol_errors =
404 port_stats->rx_alignment_symbol_errors;
405
406 drvs->tx_pauseframes = port_stats->tx_pauseframes;
407 drvs->tx_controlframes = port_stats->tx_controlframes;
408
409 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000411 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000412 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000415 drvs->forwarded_packets = rxf_stats->forwarded_packets;
416 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
418 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
420}
421
Sathya Perlaca34fe32012-11-06 17:48:56 +0000422static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000424 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
425 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
426 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000428 &rxf_stats->port[adapter->port_num];
429 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430
Sathya Perlaac124ff2011-07-25 19:10:14 +0000431 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000432 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
433 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000434 drvs->rx_pause_frames = port_stats->rx_pause_frames;
435 drvs->rx_crc_errors = port_stats->rx_crc_errors;
436 drvs->rx_control_frames = port_stats->rx_control_frames;
437 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
438 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
439 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
440 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
441 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
442 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
443 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
444 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
445 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
446 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
447 drvs->rx_dropped_header_too_small =
448 port_stats->rx_dropped_header_too_small;
449 drvs->rx_input_fifo_overflow_drop =
450 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000451 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000452 drvs->rx_alignment_symbol_errors =
453 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000455 drvs->tx_pauseframes = port_stats->tx_pauseframes;
456 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000457 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000458 drvs->jabber_events = port_stats->jabber_events;
459 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461 drvs->forwarded_packets = rxf_stats->forwarded_packets;
462 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000463 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
464 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000465 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
466}
467
Ajit Khaparde61000862013-10-03 16:16:33 -0500468static void populate_be_v2_stats(struct be_adapter *adapter)
469{
470 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
471 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
472 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
473 struct be_port_rxf_stats_v2 *port_stats =
474 &rxf_stats->port[adapter->port_num];
475 struct be_drv_stats *drvs = &adapter->drv_stats;
476
477 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
478 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
479 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
480 drvs->rx_pause_frames = port_stats->rx_pause_frames;
481 drvs->rx_crc_errors = port_stats->rx_crc_errors;
482 drvs->rx_control_frames = port_stats->rx_control_frames;
483 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
484 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
485 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
486 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
487 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
488 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
489 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
490 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
491 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
492 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
493 drvs->rx_dropped_header_too_small =
494 port_stats->rx_dropped_header_too_small;
495 drvs->rx_input_fifo_overflow_drop =
496 port_stats->rx_input_fifo_overflow_drop;
497 drvs->rx_address_filtered = port_stats->rx_address_filtered;
498 drvs->rx_alignment_symbol_errors =
499 port_stats->rx_alignment_symbol_errors;
500 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
501 drvs->tx_pauseframes = port_stats->tx_pauseframes;
502 drvs->tx_controlframes = port_stats->tx_controlframes;
503 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
504 drvs->jabber_events = port_stats->jabber_events;
505 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
506 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
507 drvs->forwarded_packets = rxf_stats->forwarded_packets;
508 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
509 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
510 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
511 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530512 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500513 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
514 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
515 drvs->rx_roce_frames = port_stats->roce_frames_received;
516 drvs->roce_drops_crc = port_stats->roce_drops_crc;
517 drvs->roce_drops_payload_len =
518 port_stats->roce_drops_payload_len;
519 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500520}
521
Selvin Xavier005d5692011-05-16 07:36:35 +0000522static void populate_lancer_stats(struct be_adapter *adapter)
523{
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530525 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526
527 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
528 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
529 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
530 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000531 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000532 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000533 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
534 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
535 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
536 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
537 drvs->rx_dropped_tcp_length =
538 pport_stats->rx_dropped_invalid_tcp_length;
539 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
540 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
541 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
542 drvs->rx_dropped_header_too_small =
543 pport_stats->rx_dropped_header_too_small;
544 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000545 drvs->rx_address_filtered =
546 pport_stats->rx_address_filtered +
547 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000548 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000549 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000550 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
551 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000552 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000553 drvs->forwarded_packets = pport_stats->num_forwards_lo;
554 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000555 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000556 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000557}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000558
Sathya Perla09c1c682011-08-22 19:41:53 +0000559static void accumulate_16bit_val(u32 *acc, u16 val)
560{
561#define lo(x) (x & 0xFFFF)
562#define hi(x) (x & 0xFFFF0000)
563 bool wrapped = val < lo(*acc);
564 u32 newacc = hi(*acc) + val;
565
566 if (wrapped)
567 newacc += 65536;
568 ACCESS_ONCE(*acc) = newacc;
569}
570
Jingoo Han4188e7d2013-08-05 18:02:02 +0900571static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530572 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000573{
574 if (!BEx_chip(adapter))
575 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
576 else
577 /* below erx HW counter can actually wrap around after
578 * 65535. Driver accumulates a 32-bit value
579 */
580 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
581 (u16)erx_stat);
582}
583
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000584void be_parse_stats(struct be_adapter *adapter)
585{
Ajit Khaparde61000862013-10-03 16:16:33 -0500586 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000587 struct be_rx_obj *rxo;
588 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000589 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000590
Sathya Perlaca34fe32012-11-06 17:48:56 +0000591 if (lancer_chip(adapter)) {
592 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000593 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000594 if (BE2_chip(adapter))
595 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500596 else if (BE3_chip(adapter))
597 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000598 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500599 else
600 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000601
Ajit Khaparde61000862013-10-03 16:16:33 -0500602 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000603 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000604 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
605 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000606 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000607 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000608}
609
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530611 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000614 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700615 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000616 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 u64 pkts, bytes;
618 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700619 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700620
Sathya Perla3abcded2010-10-03 22:12:27 -0700621 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000622 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530623
Sathya Perlaab1594e2011-07-25 19:10:15 +0000624 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700625 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000626 pkts = rx_stats(rxo)->rx_pkts;
627 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700628 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000629 stats->rx_packets += pkts;
630 stats->rx_bytes += bytes;
631 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
632 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
633 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700634 }
635
Sathya Perla3c8def92011-06-12 20:01:58 +0000636 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530638
Sathya Perlaab1594e2011-07-25 19:10:15 +0000639 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700640 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 pkts = tx_stats(txo)->tx_pkts;
642 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700643 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->tx_packets += pkts;
645 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000646 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647
648 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000650 drvs->rx_alignment_symbol_errors +
651 drvs->rx_in_range_errors +
652 drvs->rx_out_range_errors +
653 drvs->rx_frame_too_long +
654 drvs->rx_dropped_too_small +
655 drvs->rx_dropped_too_short +
656 drvs->rx_dropped_header_too_small +
657 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000658 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700659
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000661 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000662 drvs->rx_out_range_errors +
663 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000664
Sathya Perlaab1594e2011-07-25 19:10:15 +0000665 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666
667 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000668 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000669
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 /* receiver fifo overrun */
671 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000673 drvs->rx_input_fifo_overflow_drop +
674 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000675 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676}
677
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000678void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680 struct net_device *netdev = adapter->netdev;
681
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000682 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000683 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000684 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000686
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530687 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000688 netif_carrier_on(netdev);
689 else
690 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200691
692 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693}
694
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500695static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696{
Sathya Perla3c8def92011-06-12 20:01:58 +0000697 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530698 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000699
Sathya Perlaab1594e2011-07-25 19:10:15 +0000700 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000701 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500702 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530703 stats->tx_pkts += tx_pkts;
704 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
705 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000706 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700707}
708
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500709/* Returns number of WRBs needed for the skb */
710static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700711{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500712 /* +1 for the header wrb */
713 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714}
715
716static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
717{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500718 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
719 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
720 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
721 wrb->rsvd0 = 0;
722}
723
724/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
725 * to avoid the swap and shift/mask operations in wrb_fill().
726 */
727static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
728{
729 wrb->frag_pa_hi = 0;
730 wrb->frag_pa_lo = 0;
731 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000732 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700733}
734
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000735static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530736 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000737{
738 u8 vlan_prio;
739 u16 vlan_tag;
740
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100741 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000742 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
743 /* If vlan priority provided by OS is NOT in available bmap */
744 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
745 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500746 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000747
748 return vlan_tag;
749}
750
Sathya Perlac9c47142014-03-27 10:46:19 +0530751/* Used only for IP tunnel packets */
752static u16 skb_inner_ip_proto(struct sk_buff *skb)
753{
754 return (inner_ip_hdr(skb)->version == 4) ?
755 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
756}
757
758static u16 skb_ip_proto(struct sk_buff *skb)
759{
760 return (ip_hdr(skb)->version == 4) ?
761 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
762}
763
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530764static inline bool be_is_txq_full(struct be_tx_obj *txo)
765{
766 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
767}
768
769static inline bool be_can_txq_wake(struct be_tx_obj *txo)
770{
771 return atomic_read(&txo->q.used) < txo->q.len / 2;
772}
773
774static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
775{
776 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
777}
778
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530779static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
780 struct sk_buff *skb,
781 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530783 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000785 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530786 BE_WRB_F_SET(wrb_params->features, LSO, 1);
787 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000788 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530789 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700790 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530791 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530792 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530793 proto = skb_inner_ip_proto(skb);
794 } else {
795 proto = skb_ip_proto(skb);
796 }
797 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530798 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530799 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530800 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 }
802
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100803 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530804 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
805 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 }
807
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530808 BE_WRB_F_SET(wrb_params->features, CRC, 1);
809}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500810
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530811static void wrb_fill_hdr(struct be_adapter *adapter,
812 struct be_eth_hdr_wrb *hdr,
813 struct be_wrb_params *wrb_params,
814 struct sk_buff *skb)
815{
816 memset(hdr, 0, sizeof(*hdr));
817
818 SET_TX_WRB_HDR_BITS(crc, hdr,
819 BE_WRB_F_GET(wrb_params->features, CRC));
820 SET_TX_WRB_HDR_BITS(ipcs, hdr,
821 BE_WRB_F_GET(wrb_params->features, IPCS));
822 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
823 BE_WRB_F_GET(wrb_params->features, TCPCS));
824 SET_TX_WRB_HDR_BITS(udpcs, hdr,
825 BE_WRB_F_GET(wrb_params->features, UDPCS));
826
827 SET_TX_WRB_HDR_BITS(lso, hdr,
828 BE_WRB_F_GET(wrb_params->features, LSO));
829 SET_TX_WRB_HDR_BITS(lso6, hdr,
830 BE_WRB_F_GET(wrb_params->features, LSO6));
831 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
832
833 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
834 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500835 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530836 SET_TX_WRB_HDR_BITS(event, hdr,
837 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
838 SET_TX_WRB_HDR_BITS(vlan, hdr,
839 BE_WRB_F_GET(wrb_params->features, VLAN));
840 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
841
842 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
843 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530844 SET_TX_WRB_HDR_BITS(mgmt, hdr,
845 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846}
847
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000848static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530849 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000850{
851 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500852 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000853
Sathya Perla7101e112010-03-22 20:41:12 +0000854
Sathya Perlaf986afc2015-02-06 08:18:43 -0500855 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
856 (u64)le32_to_cpu(wrb->frag_pa_lo);
857 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000858 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500859 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000860 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500861 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000862 }
863}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530865/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530866static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700867{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530868 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700869
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530870 queue_head_inc(&txo->q);
871 return head;
872}
873
874/* Set up the WRB header for xmit */
875static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
876 struct be_tx_obj *txo,
877 struct be_wrb_params *wrb_params,
878 struct sk_buff *skb, u16 head)
879{
880 u32 num_frags = skb_wrb_cnt(skb);
881 struct be_queue_info *txq = &txo->q;
882 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
883
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530884 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500885 be_dws_cpu_to_le(hdr, sizeof(*hdr));
886
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500887 BUG_ON(txo->sent_skb_list[head]);
888 txo->sent_skb_list[head] = skb;
889 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530890 atomic_add(num_frags, &txq->used);
891 txo->last_req_wrb_cnt = num_frags;
892 txo->pend_wrb_cnt += num_frags;
893}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700894
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530895/* Setup a WRB fragment (buffer descriptor) for xmit */
896static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
897 int len)
898{
899 struct be_eth_wrb *wrb;
900 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700901
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530902 wrb = queue_head_node(txq);
903 wrb_fill(wrb, busaddr, len);
904 queue_head_inc(txq);
905}
906
907/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
908 * was invoked. The producer index is restored to the previous packet and the
909 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
910 */
911static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530912 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530913 u32 copied)
914{
915 struct device *dev;
916 struct be_eth_wrb *wrb;
917 struct be_queue_info *txq = &txo->q;
918
919 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500920 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530921
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500922 /* skip the first wrb (hdr); it's not mapped */
923 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000924 while (copied) {
925 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000926 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000927 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500928 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000929 queue_head_inc(txq);
930 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530931
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500932 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530933}
934
935/* Enqueue the given packet for transmit. This routine allocates WRBs for the
936 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
937 * of WRBs used up by the packet.
938 */
939static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
940 struct sk_buff *skb,
941 struct be_wrb_params *wrb_params)
942{
943 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
944 struct device *dev = &adapter->pdev->dev;
945 struct be_queue_info *txq = &txo->q;
946 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530947 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530948 dma_addr_t busaddr;
949 int len;
950
951 head = be_tx_get_wrb_hdr(txo);
952
953 if (skb->len > skb->data_len) {
954 len = skb_headlen(skb);
955
956 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
957 if (dma_mapping_error(dev, busaddr))
958 goto dma_err;
959 map_single = true;
960 be_tx_setup_wrb_frag(txo, busaddr, len);
961 copied += len;
962 }
963
964 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
965 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
966 len = skb_frag_size(frag);
967
968 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
969 if (dma_mapping_error(dev, busaddr))
970 goto dma_err;
971 be_tx_setup_wrb_frag(txo, busaddr, len);
972 copied += len;
973 }
974
975 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
976
977 be_tx_stats_update(txo, skb);
978 return wrb_cnt;
979
980dma_err:
981 adapter->drv_stats.dma_map_errors++;
982 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000983 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700984}
985
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500986static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
987{
988 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
989}
990
Somnath Kotur93040ae2012-06-26 22:32:10 +0000991static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000992 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530993 struct be_wrb_params
994 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000995{
996 u16 vlan_tag = 0;
997
998 skb = skb_share_check(skb, GFP_ATOMIC);
999 if (unlikely(!skb))
1000 return skb;
1001
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001002 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +00001003 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301004
1005 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1006 if (!vlan_tag)
1007 vlan_tag = adapter->pvid;
1008 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1009 * skip VLAN insertion
1010 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301011 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301012 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001013
1014 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001015 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1016 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001017 if (unlikely(!skb))
1018 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001019 skb->vlan_tci = 0;
1020 }
1021
1022 /* Insert the outer VLAN, if any */
1023 if (adapter->qnq_vid) {
1024 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001025 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1026 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001027 if (unlikely(!skb))
1028 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301029 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001030 }
1031
Somnath Kotur93040ae2012-06-26 22:32:10 +00001032 return skb;
1033}
1034
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001035static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1036{
1037 struct ethhdr *eh = (struct ethhdr *)skb->data;
1038 u16 offset = ETH_HLEN;
1039
1040 if (eh->h_proto == htons(ETH_P_IPV6)) {
1041 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1042
1043 offset += sizeof(struct ipv6hdr);
1044 if (ip6h->nexthdr != NEXTHDR_TCP &&
1045 ip6h->nexthdr != NEXTHDR_UDP) {
1046 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301047 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001048
1049 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1050 if (ehdr->hdrlen == 0xff)
1051 return true;
1052 }
1053 }
1054 return false;
1055}
1056
1057static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1058{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001059 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001060}
1061
Sathya Perla748b5392014-05-09 13:29:13 +05301062static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001063{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001064 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001065}
1066
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301067static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1068 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301069 struct be_wrb_params
1070 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001072 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001073 unsigned int eth_hdr_len;
1074 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001075
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001076 /* For padded packets, BE HW modifies tot_len field in IP header
1077 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001078 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001079 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001080 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1081 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001082 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001083 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001084 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001085 ip = (struct iphdr *)ip_hdr(skb);
1086 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1087 }
1088
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001089 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301090 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001091 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301092 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001093 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301094 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001095
Somnath Kotur93040ae2012-06-26 22:32:10 +00001096 /* HW has a bug wherein it will calculate CSUM for VLAN
1097 * pkts even though it is disabled.
1098 * Manually insert VLAN in pkt.
1099 */
1100 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001101 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301102 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001103 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301104 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001105 }
1106
1107 /* HW may lockup when VLAN HW tagging is requested on
1108 * certain ipv6 packets. Drop such pkts if the HW workaround to
1109 * skip HW tagging is not enabled by FW.
1110 */
1111 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301112 (adapter->pvid || adapter->qnq_vid) &&
1113 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001114 goto tx_drop;
1115
1116 /* Manual VLAN tag insertion to prevent:
1117 * ASIC lockup when the ASIC inserts VLAN tag into
1118 * certain ipv6 packets. Insert VLAN tags in driver,
1119 * and set event, completion, vlan bits accordingly
1120 * in the Tx WRB.
1121 */
1122 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1123 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301124 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001125 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301126 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001127 }
1128
Sathya Perlaee9c7992013-05-22 23:04:55 +00001129 return skb;
1130tx_drop:
1131 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301132err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001133 return NULL;
1134}
1135
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301136static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1137 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301138 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301139{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301140 int err;
1141
Suresh Reddy8227e992015-10-12 03:47:19 -04001142 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1143 * packets that are 32b or less may cause a transmit stall
1144 * on that port. The workaround is to pad such packets
1145 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301146 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001147 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001148 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301149 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301150 }
1151
1152 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301153 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301154 if (!skb)
1155 return NULL;
1156 }
1157
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301158 /* The stack can send us skbs with length greater than
1159 * what the HW can handle. Trim the extra bytes.
1160 */
1161 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1162 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1163 WARN_ON(err);
1164
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301165 return skb;
1166}
1167
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001168static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1169{
1170 struct be_queue_info *txq = &txo->q;
1171 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1172
1173 /* Mark the last request eventable if it hasn't been marked already */
1174 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1175 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1176
1177 /* compose a dummy wrb if there are odd set of wrbs to notify */
1178 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001179 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001180 queue_head_inc(txq);
1181 atomic_inc(&txq->used);
1182 txo->pend_wrb_cnt++;
1183 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1184 TX_HDR_WRB_NUM_SHIFT);
1185 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1186 TX_HDR_WRB_NUM_SHIFT);
1187 }
1188 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1189 txo->pend_wrb_cnt = 0;
1190}
1191
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301192/* OS2BMC related */
1193
1194#define DHCP_CLIENT_PORT 68
1195#define DHCP_SERVER_PORT 67
1196#define NET_BIOS_PORT1 137
1197#define NET_BIOS_PORT2 138
1198#define DHCPV6_RAS_PORT 547
1199
1200#define is_mc_allowed_on_bmc(adapter, eh) \
1201 (!is_multicast_filt_enabled(adapter) && \
1202 is_multicast_ether_addr(eh->h_dest) && \
1203 !is_broadcast_ether_addr(eh->h_dest))
1204
1205#define is_bc_allowed_on_bmc(adapter, eh) \
1206 (!is_broadcast_filt_enabled(adapter) && \
1207 is_broadcast_ether_addr(eh->h_dest))
1208
1209#define is_arp_allowed_on_bmc(adapter, skb) \
1210 (is_arp(skb) && is_arp_filt_enabled(adapter))
1211
1212#define is_broadcast_packet(eh, adapter) \
1213 (is_multicast_ether_addr(eh->h_dest) && \
1214 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1215
1216#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1217
1218#define is_arp_filt_enabled(adapter) \
1219 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1220
1221#define is_dhcp_client_filt_enabled(adapter) \
1222 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1223
1224#define is_dhcp_srvr_filt_enabled(adapter) \
1225 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1226
1227#define is_nbios_filt_enabled(adapter) \
1228 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1229
1230#define is_ipv6_na_filt_enabled(adapter) \
1231 (adapter->bmc_filt_mask & \
1232 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1233
1234#define is_ipv6_ra_filt_enabled(adapter) \
1235 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1236
1237#define is_ipv6_ras_filt_enabled(adapter) \
1238 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1239
1240#define is_broadcast_filt_enabled(adapter) \
1241 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1242
1243#define is_multicast_filt_enabled(adapter) \
1244 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1245
1246static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1247 struct sk_buff **skb)
1248{
1249 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1250 bool os2bmc = false;
1251
1252 if (!be_is_os2bmc_enabled(adapter))
1253 goto done;
1254
1255 if (!is_multicast_ether_addr(eh->h_dest))
1256 goto done;
1257
1258 if (is_mc_allowed_on_bmc(adapter, eh) ||
1259 is_bc_allowed_on_bmc(adapter, eh) ||
1260 is_arp_allowed_on_bmc(adapter, (*skb))) {
1261 os2bmc = true;
1262 goto done;
1263 }
1264
1265 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1266 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1267 u8 nexthdr = hdr->nexthdr;
1268
1269 if (nexthdr == IPPROTO_ICMPV6) {
1270 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1271
1272 switch (icmp6->icmp6_type) {
1273 case NDISC_ROUTER_ADVERTISEMENT:
1274 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1275 goto done;
1276 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1277 os2bmc = is_ipv6_na_filt_enabled(adapter);
1278 goto done;
1279 default:
1280 break;
1281 }
1282 }
1283 }
1284
1285 if (is_udp_pkt((*skb))) {
1286 struct udphdr *udp = udp_hdr((*skb));
1287
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001288 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301289 case DHCP_CLIENT_PORT:
1290 os2bmc = is_dhcp_client_filt_enabled(adapter);
1291 goto done;
1292 case DHCP_SERVER_PORT:
1293 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1294 goto done;
1295 case NET_BIOS_PORT1:
1296 case NET_BIOS_PORT2:
1297 os2bmc = is_nbios_filt_enabled(adapter);
1298 goto done;
1299 case DHCPV6_RAS_PORT:
1300 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1301 goto done;
1302 default:
1303 break;
1304 }
1305 }
1306done:
1307 /* For packets over a vlan, which are destined
1308 * to BMC, asic expects the vlan to be inline in the packet.
1309 */
1310 if (os2bmc)
1311 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1312
1313 return os2bmc;
1314}
1315
Sathya Perlaee9c7992013-05-22 23:04:55 +00001316static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1317{
1318 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001319 u16 q_idx = skb_get_queue_mapping(skb);
1320 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301321 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301322 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001323 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001324
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301325 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001326 if (unlikely(!skb))
1327 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001328
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301329 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1330
1331 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001332 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001333 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001334 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001335 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001336
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301337 /* if os2bmc is enabled and if the pkt is destined to bmc,
1338 * enqueue the pkt a 2nd time with mgmt bit set.
1339 */
1340 if (be_send_pkt_to_bmc(adapter, &skb)) {
1341 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1342 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1343 if (unlikely(!wrb_cnt))
1344 goto drop;
1345 else
1346 skb_get(skb);
1347 }
1348
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301349 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001350 netif_stop_subqueue(netdev, q_idx);
1351 tx_stats(txo)->tx_stops++;
1352 }
1353
1354 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1355 be_xmit_flush(adapter, txo);
1356
1357 return NETDEV_TX_OK;
1358drop:
1359 tx_stats(txo)->tx_drv_drops++;
1360 /* Flush the already enqueued tx requests */
1361 if (flush && txo->pend_wrb_cnt)
1362 be_xmit_flush(adapter, txo);
1363
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364 return NETDEV_TX_OK;
1365}
1366
1367static int be_change_mtu(struct net_device *netdev, int new_mtu)
1368{
1369 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301370 struct device *dev = &adapter->pdev->dev;
1371
1372 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1373 dev_info(dev, "MTU must be between %d and %d bytes\n",
1374 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001375 return -EINVAL;
1376 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301377
1378 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301379 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001380 netdev->mtu = new_mtu;
1381 return 0;
1382}
1383
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001384static inline bool be_in_all_promisc(struct be_adapter *adapter)
1385{
1386 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1387 BE_IF_FLAGS_ALL_PROMISCUOUS;
1388}
1389
1390static int be_set_vlan_promisc(struct be_adapter *adapter)
1391{
1392 struct device *dev = &adapter->pdev->dev;
1393 int status;
1394
1395 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1396 return 0;
1397
1398 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1399 if (!status) {
1400 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1401 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1402 } else {
1403 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1404 }
1405 return status;
1406}
1407
1408static int be_clear_vlan_promisc(struct be_adapter *adapter)
1409{
1410 struct device *dev = &adapter->pdev->dev;
1411 int status;
1412
1413 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1414 if (!status) {
1415 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1416 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1417 }
1418 return status;
1419}
1420
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001422 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1423 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001424 */
Sathya Perla10329df2012-06-05 19:37:18 +00001425static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426{
Vasundhara Volam50762662014-09-12 17:39:14 +05301427 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001428 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301429 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001430 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001431
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001432 /* No need to change the VLAN state if the I/F is in promiscuous */
1433 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001434 return 0;
1435
Sathya Perla92bf14a2013-08-27 16:57:32 +05301436 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001437 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001438
Somnath Kotur841f60f2016-07-27 05:26:15 -04001439 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1440 status = be_clear_vlan_promisc(adapter);
1441 if (status)
1442 return status;
1443 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001444 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301445 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1446 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001447
Vasundhara Volam435452a2015-03-20 06:28:23 -04001448 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001449 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001450 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001451 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001452 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1453 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301454 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001455 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001457 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458}
1459
Patrick McHardy80d5c362013-04-19 02:04:28 +00001460static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461{
1462 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001463 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464
Sathya Perlab7172412016-07-27 05:26:18 -04001465 mutex_lock(&adapter->rx_filter_lock);
1466
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001467 /* Packets with VID 0 are always received by Lancer by default */
1468 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001469 goto done;
Vasundhara Volam48291c22014-03-11 18:53:08 +05301470
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301471 if (test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001472 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001473
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301474 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301475 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001476
Sathya Perlab7172412016-07-27 05:26:18 -04001477 status = be_vid_config(adapter);
1478done:
1479 mutex_unlock(&adapter->rx_filter_lock);
1480 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481}
1482
Patrick McHardy80d5c362013-04-19 02:04:28 +00001483static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484{
1485 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perlab7172412016-07-27 05:26:18 -04001486 int status = 0;
1487
1488 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001490 /* Packets with VID 0 are always received by Lancer by default */
1491 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001492 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001493
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301494 if (!test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001495 goto done;
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301496
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301497 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301498 adapter->vlans_added--;
1499
Sathya Perlab7172412016-07-27 05:26:18 -04001500 status = be_vid_config(adapter);
1501done:
1502 mutex_unlock(&adapter->rx_filter_lock);
1503 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001504}
1505
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001506static void be_set_all_promisc(struct be_adapter *adapter)
1507{
1508 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1509 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1510}
1511
1512static void be_set_mc_promisc(struct be_adapter *adapter)
1513{
1514 int status;
1515
1516 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1517 return;
1518
1519 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1520 if (!status)
1521 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1522}
1523
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001524static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001525{
1526 int status;
1527
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001528 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1529 return;
1530
1531 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001532 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001533 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1534}
1535
1536static void be_clear_uc_promisc(struct be_adapter *adapter)
1537{
1538 int status;
1539
1540 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1541 return;
1542
1543 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1544 if (!status)
1545 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1546}
1547
1548/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1549 * We use a single callback function for both sync and unsync. We really don't
1550 * add/remove addresses through this callback. But, we use it to detect changes
1551 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1552 */
1553static int be_uc_list_update(struct net_device *netdev,
1554 const unsigned char *addr)
1555{
1556 struct be_adapter *adapter = netdev_priv(netdev);
1557
1558 adapter->update_uc_list = true;
1559 return 0;
1560}
1561
1562static int be_mc_list_update(struct net_device *netdev,
1563 const unsigned char *addr)
1564{
1565 struct be_adapter *adapter = netdev_priv(netdev);
1566
1567 adapter->update_mc_list = true;
1568 return 0;
1569}
1570
1571static void be_set_mc_list(struct be_adapter *adapter)
1572{
1573 struct net_device *netdev = adapter->netdev;
Sathya Perlab7172412016-07-27 05:26:18 -04001574 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001575 bool mc_promisc = false;
1576 int status;
1577
Sathya Perlab7172412016-07-27 05:26:18 -04001578 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001579 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1580
1581 if (netdev->flags & IFF_PROMISC) {
1582 adapter->update_mc_list = false;
1583 } else if (netdev->flags & IFF_ALLMULTI ||
1584 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1585 /* Enable multicast promisc if num configured exceeds
1586 * what we support
1587 */
1588 mc_promisc = true;
1589 adapter->update_mc_list = false;
1590 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1591 /* Update mc-list unconditionally if the iface was previously
1592 * in mc-promisc mode and now is out of that mode.
1593 */
1594 adapter->update_mc_list = true;
1595 }
1596
Sathya Perlab7172412016-07-27 05:26:18 -04001597 if (adapter->update_mc_list) {
1598 int i = 0;
1599
1600 /* cache the mc-list in adapter */
1601 netdev_for_each_mc_addr(ha, netdev) {
1602 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1603 i++;
1604 }
1605 adapter->mc_count = netdev_mc_count(netdev);
1606 }
1607 netif_addr_unlock_bh(netdev);
1608
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001609 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001610 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001611 } else if (adapter->update_mc_list) {
1612 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1613 if (!status)
1614 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1615 else
1616 be_set_mc_promisc(adapter);
1617
1618 adapter->update_mc_list = false;
1619 }
1620}
1621
1622static void be_clear_mc_list(struct be_adapter *adapter)
1623{
1624 struct net_device *netdev = adapter->netdev;
1625
1626 __dev_mc_unsync(netdev, NULL);
1627 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlab7172412016-07-27 05:26:18 -04001628 adapter->mc_count = 0;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001629}
1630
1631static void be_set_uc_list(struct be_adapter *adapter)
1632{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001633 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001634 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001635 bool uc_promisc = false;
Sathya Perlab7172412016-07-27 05:26:18 -04001636 int curr_uc_macs = 0, i;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001637
Sathya Perlab7172412016-07-27 05:26:18 -04001638 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001639 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001640
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001641 if (netdev->flags & IFF_PROMISC) {
1642 adapter->update_uc_list = false;
1643 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1644 uc_promisc = true;
1645 adapter->update_uc_list = false;
1646 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1647 /* Update uc-list unconditionally if the iface was previously
1648 * in uc-promisc mode and now is out of that mode.
1649 */
1650 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001651 }
1652
Sathya Perlab7172412016-07-27 05:26:18 -04001653 if (adapter->update_uc_list) {
1654 i = 1; /* First slot is claimed by the Primary MAC */
1655
1656 /* cache the uc-list in adapter array */
1657 netdev_for_each_uc_addr(ha, netdev) {
1658 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1659 i++;
1660 }
1661 curr_uc_macs = netdev_uc_count(netdev);
1662 }
1663 netif_addr_unlock_bh(netdev);
1664
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001665 if (uc_promisc) {
1666 be_set_uc_promisc(adapter);
1667 } else if (adapter->update_uc_list) {
1668 be_clear_uc_promisc(adapter);
1669
Sathya Perlab7172412016-07-27 05:26:18 -04001670 for (i = 0; i < adapter->uc_macs; i++)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001671 be_cmd_pmac_del(adapter, adapter->if_handle,
Sathya Perlab7172412016-07-27 05:26:18 -04001672 adapter->pmac_id[i + 1], 0);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001673
Sathya Perlab7172412016-07-27 05:26:18 -04001674 for (i = 0; i < curr_uc_macs; i++)
1675 be_cmd_pmac_add(adapter, adapter->uc_list[i].mac,
1676 adapter->if_handle,
1677 &adapter->pmac_id[i + 1], 0);
1678 adapter->uc_macs = curr_uc_macs;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001679 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001680 }
1681}
1682
1683static void be_clear_uc_list(struct be_adapter *adapter)
1684{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001685 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001686 int i;
1687
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001688 __dev_uc_unsync(netdev, NULL);
Sathya Perlab7172412016-07-27 05:26:18 -04001689 for (i = 0; i < adapter->uc_macs; i++)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001690 be_cmd_pmac_del(adapter, adapter->if_handle,
Sathya Perlab7172412016-07-27 05:26:18 -04001691 adapter->pmac_id[i + 1], 0);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001692 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301693}
1694
Sathya Perlab7172412016-07-27 05:26:18 -04001695static void __be_set_rx_mode(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696{
Sathya Perlab7172412016-07-27 05:26:18 -04001697 struct net_device *netdev = adapter->netdev;
1698
1699 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700
1701 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001702 if (!be_in_all_promisc(adapter))
1703 be_set_all_promisc(adapter);
1704 } else if (be_in_all_promisc(adapter)) {
1705 /* We need to re-program the vlan-list or clear
1706 * vlan-promisc mode (if needed) when the interface
1707 * comes out of promisc mode.
1708 */
1709 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001711
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001712 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001713 be_set_mc_list(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04001714
1715 mutex_unlock(&adapter->rx_filter_lock);
1716}
1717
1718static void be_work_set_rx_mode(struct work_struct *work)
1719{
1720 struct be_cmd_work *cmd_work =
1721 container_of(work, struct be_cmd_work, work);
1722
1723 __be_set_rx_mode(cmd_work->adapter);
1724 kfree(cmd_work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725}
1726
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001727static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1728{
1729 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001730 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001731 int status;
1732
Sathya Perla11ac75e2011-12-13 00:58:50 +00001733 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001734 return -EPERM;
1735
Sathya Perla11ac75e2011-12-13 00:58:50 +00001736 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001737 return -EINVAL;
1738
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301739 /* Proceed further only if user provided MAC is different
1740 * from active MAC
1741 */
1742 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1743 return 0;
1744
Sathya Perla3175d8c2013-07-23 15:25:03 +05301745 if (BEx_chip(adapter)) {
1746 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1747 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001748
Sathya Perla11ac75e2011-12-13 00:58:50 +00001749 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1750 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301751 } else {
1752 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1753 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001754 }
1755
Kalesh APabccf232014-07-17 16:20:24 +05301756 if (status) {
1757 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1758 mac, vf, status);
1759 return be_cmd_status(status);
1760 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001761
Kalesh APabccf232014-07-17 16:20:24 +05301762 ether_addr_copy(vf_cfg->mac_addr, mac);
1763
1764 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001765}
1766
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001767static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301768 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001769{
1770 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001771 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001772
Sathya Perla11ac75e2011-12-13 00:58:50 +00001773 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001774 return -EPERM;
1775
Sathya Perla11ac75e2011-12-13 00:58:50 +00001776 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001777 return -EINVAL;
1778
1779 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001780 vi->max_tx_rate = vf_cfg->tx_rate;
1781 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001782 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1783 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001784 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301785 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001786 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001787
1788 return 0;
1789}
1790
Vasundhara Volam435452a2015-03-20 06:28:23 -04001791static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1792{
1793 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1794 u16 vids[BE_NUM_VLANS_SUPPORTED];
1795 int vf_if_id = vf_cfg->if_handle;
1796 int status;
1797
1798 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001799 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001800 if (status)
1801 return status;
1802
1803 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1804 vids[0] = 0;
1805 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1806 if (!status)
1807 dev_info(&adapter->pdev->dev,
1808 "Cleared guest VLANs on VF%d", vf);
1809
1810 /* After TVT is enabled, disallow VFs to program VLAN filters */
1811 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1812 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1813 ~BE_PRIV_FILTMGMT, vf + 1);
1814 if (!status)
1815 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1816 }
1817 return 0;
1818}
1819
1820static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1821{
1822 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1823 struct device *dev = &adapter->pdev->dev;
1824 int status;
1825
1826 /* Reset Transparent VLAN Tagging. */
1827 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001828 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001829 if (status)
1830 return status;
1831
1832 /* Allow VFs to program VLAN filtering */
1833 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1834 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1835 BE_PRIV_FILTMGMT, vf + 1);
1836 if (!status) {
1837 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1838 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1839 }
1840 }
1841
1842 dev_info(dev,
1843 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1844 return 0;
1845}
1846
Sathya Perla748b5392014-05-09 13:29:13 +05301847static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001848{
1849 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001850 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001851 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001852
Sathya Perla11ac75e2011-12-13 00:58:50 +00001853 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001854 return -EPERM;
1855
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001856 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001857 return -EINVAL;
1858
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001859 if (vlan || qos) {
1860 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001861 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001862 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001863 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001864 }
1865
Kalesh APabccf232014-07-17 16:20:24 +05301866 if (status) {
1867 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001868 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1869 status);
Kalesh APabccf232014-07-17 16:20:24 +05301870 return be_cmd_status(status);
1871 }
1872
1873 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301874 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001875}
1876
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001877static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1878 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001879{
1880 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301881 struct device *dev = &adapter->pdev->dev;
1882 int percent_rate, status = 0;
1883 u16 link_speed = 0;
1884 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001885
Sathya Perla11ac75e2011-12-13 00:58:50 +00001886 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001887 return -EPERM;
1888
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001889 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001890 return -EINVAL;
1891
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001892 if (min_tx_rate)
1893 return -EINVAL;
1894
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301895 if (!max_tx_rate)
1896 goto config_qos;
1897
1898 status = be_cmd_link_status_query(adapter, &link_speed,
1899 &link_status, 0);
1900 if (status)
1901 goto err;
1902
1903 if (!link_status) {
1904 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301905 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301906 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001907 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001908
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301909 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1910 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1911 link_speed);
1912 status = -EINVAL;
1913 goto err;
1914 }
1915
1916 /* On Skyhawk the QOS setting must be done only as a % value */
1917 percent_rate = link_speed / 100;
1918 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1919 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1920 percent_rate);
1921 status = -EINVAL;
1922 goto err;
1923 }
1924
1925config_qos:
1926 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001927 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301928 goto err;
1929
1930 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1931 return 0;
1932
1933err:
1934 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1935 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301936 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001937}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301938
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301939static int be_set_vf_link_state(struct net_device *netdev, int vf,
1940 int link_state)
1941{
1942 struct be_adapter *adapter = netdev_priv(netdev);
1943 int status;
1944
1945 if (!sriov_enabled(adapter))
1946 return -EPERM;
1947
1948 if (vf >= adapter->num_vfs)
1949 return -EINVAL;
1950
1951 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301952 if (status) {
1953 dev_err(&adapter->pdev->dev,
1954 "Link state change on VF %d failed: %#x\n", vf, status);
1955 return be_cmd_status(status);
1956 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301957
Kalesh APabccf232014-07-17 16:20:24 +05301958 adapter->vf_cfg[vf].plink_tracking = link_state;
1959
1960 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301961}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001962
Kalesh APe7bcbd72015-05-06 05:30:32 -04001963static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1964{
1965 struct be_adapter *adapter = netdev_priv(netdev);
1966 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1967 u8 spoofchk;
1968 int status;
1969
1970 if (!sriov_enabled(adapter))
1971 return -EPERM;
1972
1973 if (vf >= adapter->num_vfs)
1974 return -EINVAL;
1975
1976 if (BEx_chip(adapter))
1977 return -EOPNOTSUPP;
1978
1979 if (enable == vf_cfg->spoofchk)
1980 return 0;
1981
1982 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1983
1984 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1985 0, spoofchk);
1986 if (status) {
1987 dev_err(&adapter->pdev->dev,
1988 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1989 return be_cmd_status(status);
1990 }
1991
1992 vf_cfg->spoofchk = enable;
1993 return 0;
1994}
1995
Sathya Perla2632baf2013-10-01 16:00:00 +05301996static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1997 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001998{
Sathya Perla2632baf2013-10-01 16:00:00 +05301999 aic->rx_pkts_prev = rx_pkts;
2000 aic->tx_reqs_prev = tx_pkts;
2001 aic->jiffies = now;
2002}
Sathya Perlaac124ff2011-07-25 19:10:14 +00002003
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002004static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05302005{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002006 struct be_adapter *adapter = eqo->adapter;
2007 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05302008 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05302009 struct be_rx_obj *rxo;
2010 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002011 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05302012 ulong now;
2013 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002014 int i;
2015
2016 aic = &adapter->aic_obj[eqo->idx];
2017 if (!aic->enable) {
2018 if (aic->jiffies)
2019 aic->jiffies = 0;
2020 eqd = aic->et_eqd;
2021 return eqd;
2022 }
2023
2024 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2025 do {
2026 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2027 rx_pkts += rxo->stats.rx_pkts;
2028 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2029 }
2030
2031 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2032 do {
2033 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2034 tx_pkts += txo->stats.tx_reqs;
2035 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2036 }
2037
2038 /* Skip, if wrapped around or first calculation */
2039 now = jiffies;
2040 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2041 rx_pkts < aic->rx_pkts_prev ||
2042 tx_pkts < aic->tx_reqs_prev) {
2043 be_aic_update(aic, rx_pkts, tx_pkts, now);
2044 return aic->prev_eqd;
2045 }
2046
2047 delta = jiffies_to_msecs(now - aic->jiffies);
2048 if (delta == 0)
2049 return aic->prev_eqd;
2050
2051 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2052 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2053 eqd = (pps / 15000) << 2;
2054
2055 if (eqd < 8)
2056 eqd = 0;
2057 eqd = min_t(u32, eqd, aic->max_eqd);
2058 eqd = max_t(u32, eqd, aic->min_eqd);
2059
2060 be_aic_update(aic, rx_pkts, tx_pkts, now);
2061
2062 return eqd;
2063}
2064
2065/* For Skyhawk-R only */
2066static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2067{
2068 struct be_adapter *adapter = eqo->adapter;
2069 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2070 ulong now = jiffies;
2071 int eqd;
2072 u32 mult_enc;
2073
2074 if (!aic->enable)
2075 return 0;
2076
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302077 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002078 eqd = aic->prev_eqd;
2079 else
2080 eqd = be_get_new_eqd(eqo);
2081
2082 if (eqd > 100)
2083 mult_enc = R2I_DLY_ENC_1;
2084 else if (eqd > 60)
2085 mult_enc = R2I_DLY_ENC_2;
2086 else if (eqd > 20)
2087 mult_enc = R2I_DLY_ENC_3;
2088 else
2089 mult_enc = R2I_DLY_ENC_0;
2090
2091 aic->prev_eqd = eqd;
2092
2093 return mult_enc;
2094}
2095
2096void be_eqd_update(struct be_adapter *adapter, bool force_update)
2097{
2098 struct be_set_eqd set_eqd[MAX_EVT_QS];
2099 struct be_aic_obj *aic;
2100 struct be_eq_obj *eqo;
2101 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002102
Sathya Perla2632baf2013-10-01 16:00:00 +05302103 for_all_evt_queues(adapter, eqo, i) {
2104 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002105 eqd = be_get_new_eqd(eqo);
2106 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302107 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2108 set_eqd[num].eq_id = eqo->q.id;
2109 aic->prev_eqd = eqd;
2110 num++;
2111 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002112 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302113
2114 if (num)
2115 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002116}
2117
Sathya Perla3abcded2010-10-03 22:12:27 -07002118static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302119 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002120{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002121 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002122
Sathya Perlaab1594e2011-07-25 19:10:15 +00002123 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002124 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002125 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002126 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302127 if (rxcp->tunneled)
2128 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002129 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002130 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002131 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002132 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002133 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134}
2135
Sathya Perla2e588f82011-03-11 02:49:26 +00002136static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002137{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002138 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302139 * Also ignore ipcksm for ipv6 pkts
2140 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002141 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302142 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002143}
2144
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302145static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002147 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002148 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002149 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302150 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151
Sathya Perla3abcded2010-10-03 22:12:27 -07002152 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002153 BUG_ON(!rx_page_info->page);
2154
Sathya Perlae50287b2014-03-04 12:14:38 +05302155 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002156 dma_unmap_page(&adapter->pdev->dev,
2157 dma_unmap_addr(rx_page_info, bus),
2158 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302159 rx_page_info->last_frag = false;
2160 } else {
2161 dma_sync_single_for_cpu(&adapter->pdev->dev,
2162 dma_unmap_addr(rx_page_info, bus),
2163 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002164 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302166 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167 atomic_dec(&rxq->used);
2168 return rx_page_info;
2169}
2170
2171/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002172static void be_rx_compl_discard(struct be_rx_obj *rxo,
2173 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002174{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002176 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002177
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002178 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302179 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002180 put_page(page_info->page);
2181 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182 }
2183}
2184
2185/*
2186 * skb_fill_rx_data forms a complete skb for an ether frame
2187 * indicated by rxcp.
2188 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2190 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002193 u16 i, j;
2194 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002195 u8 *start;
2196
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302197 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198 start = page_address(page_info->page) + page_info->page_offset;
2199 prefetch(start);
2200
2201 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002202 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204 skb->len = curr_frag_len;
2205 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002206 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002207 /* Complete packet has now been moved to data */
2208 put_page(page_info->page);
2209 skb->data_len = 0;
2210 skb->tail += curr_frag_len;
2211 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002212 hdr_len = ETH_HLEN;
2213 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002214 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002215 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216 skb_shinfo(skb)->frags[0].page_offset =
2217 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302218 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2219 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002220 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002221 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222 skb->tail += hdr_len;
2223 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002224 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225
Sathya Perla2e588f82011-03-11 02:49:26 +00002226 if (rxcp->pkt_size <= rx_frag_size) {
2227 BUG_ON(rxcp->num_rcvd != 1);
2228 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229 }
2230
2231 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002232 remaining = rxcp->pkt_size - curr_frag_len;
2233 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302234 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002235 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002237 /* Coalesce all frags from the same physical page in one slot */
2238 if (page_info->page_offset == 0) {
2239 /* Fresh page */
2240 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002241 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002242 skb_shinfo(skb)->frags[j].page_offset =
2243 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002244 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002245 skb_shinfo(skb)->nr_frags++;
2246 } else {
2247 put_page(page_info->page);
2248 }
2249
Eric Dumazet9e903e02011-10-18 21:00:24 +00002250 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002251 skb->len += curr_frag_len;
2252 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002253 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002254 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002255 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002257 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258}
2259
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002260/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302261static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002264 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002265 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002267
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002268 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002269 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002270 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002271 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 return;
2273 }
2274
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002277 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002278 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002279 else
2280 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002282 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002283 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002284 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002285 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302286
Tom Herbertb6c0e892014-08-27 21:27:17 -07002287 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302288 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289
Jiri Pirko343e43c2011-08-25 02:50:51 +00002290 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002291 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002292
2293 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294}
2295
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002296/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002297static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2298 struct napi_struct *napi,
2299 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002301 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002303 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002304 u16 remaining, curr_frag_len;
2305 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002306
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002307 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002308 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002309 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002310 return;
2311 }
2312
Sathya Perla2e588f82011-03-11 02:49:26 +00002313 remaining = rxcp->pkt_size;
2314 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302315 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316
2317 curr_frag_len = min(remaining, rx_frag_size);
2318
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002319 /* Coalesce all frags from the same physical page in one slot */
2320 if (i == 0 || page_info->page_offset == 0) {
2321 /* First frag or Fresh page */
2322 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002323 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002324 skb_shinfo(skb)->frags[j].page_offset =
2325 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002326 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002327 } else {
2328 put_page(page_info->page);
2329 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002330 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002331 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333 memset(page_info, 0, sizeof(*page_info));
2334 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002335 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002336
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002337 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002338 skb->len = rxcp->pkt_size;
2339 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002340 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002341 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002342 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002343 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302344
Tom Herbertb6c0e892014-08-27 21:27:17 -07002345 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002346
Jiri Pirko343e43c2011-08-25 02:50:51 +00002347 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002348 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002349
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002350 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351}
2352
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2354 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002355{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302356 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2357 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2358 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2359 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2360 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2361 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2362 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2363 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2364 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2365 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2366 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002367 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302368 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2369 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002370 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302371 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302372 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302373 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002374}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002375
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002376static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2377 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002378{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302379 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2380 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2381 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2382 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2383 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2384 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2385 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2386 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2387 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2388 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2389 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002390 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302391 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2392 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002393 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302394 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2395 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002396}
2397
2398static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2399{
2400 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2401 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2402 struct be_adapter *adapter = rxo->adapter;
2403
2404 /* For checking the valid bit it is Ok to use either definition as the
2405 * valid bit is at the same position in both v0 and v1 Rx compl */
2406 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002407 return NULL;
2408
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002409 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002410 be_dws_le_to_cpu(compl, sizeof(*compl));
2411
2412 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002413 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002414 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002416
Somnath Koture38b1702013-05-29 22:55:56 +00002417 if (rxcp->ip_frag)
2418 rxcp->l4_csum = 0;
2419
Sathya Perla15d72182011-03-21 20:49:26 +00002420 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302421 /* In QNQ modes, if qnq bit is not set, then the packet was
2422 * tagged only with the transparent outer vlan-tag and must
2423 * not be treated as a vlan packet by host
2424 */
2425 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002426 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002427
Sathya Perla15d72182011-03-21 20:49:26 +00002428 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002429 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002430
Somnath Kotur939cf302011-08-18 21:51:49 -07002431 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302432 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002433 rxcp->vlanf = 0;
2434 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002435
2436 /* As the compl has been parsed, reset it; we wont touch it again */
2437 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002438
Sathya Perla3abcded2010-10-03 22:12:27 -07002439 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440 return rxcp;
2441}
2442
Eric Dumazet1829b082011-03-01 05:48:12 +00002443static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002444{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002445 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002446
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002447 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002448 gfp |= __GFP_COMP;
2449 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002450}
2451
2452/*
2453 * Allocate a page, split it to fragments of size rx_frag_size and post as
2454 * receive buffers to BE
2455 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302456static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002457{
Sathya Perla3abcded2010-10-03 22:12:27 -07002458 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002459 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002460 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002461 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002462 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002463 struct be_eth_rx_d *rxd;
2464 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302465 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002466
Sathya Perla3abcded2010-10-03 22:12:27 -07002467 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302468 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002469 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002470 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002471 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002472 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002473 break;
2474 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002475 page_dmaaddr = dma_map_page(dev, pagep, 0,
2476 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002477 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002478 if (dma_mapping_error(dev, page_dmaaddr)) {
2479 put_page(pagep);
2480 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302481 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002482 break;
2483 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302484 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002485 } else {
2486 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302487 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002488 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302489 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002491
2492 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302493 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002494 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2495 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496
2497 /* Any space left in the current big page for another frag? */
2498 if ((page_offset + rx_frag_size + rx_frag_size) >
2499 adapter->big_page_size) {
2500 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302501 page_info->last_frag = true;
2502 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2503 } else {
2504 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002505 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002506
2507 prev_page_info = page_info;
2508 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002509 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002510 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302511
2512 /* Mark the last frag of a page when we break out of the above loop
2513 * with no more slots available in the RXQ
2514 */
2515 if (pagep) {
2516 prev_page_info->last_frag = true;
2517 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2518 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519
2520 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302522 if (rxo->rx_post_starved)
2523 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302524 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002525 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302526 be_rxq_notify(adapter, rxq->id, notify);
2527 posted -= notify;
2528 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002529 } else if (atomic_read(&rxq->used) == 0) {
2530 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002531 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002532 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533}
2534
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302535static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002536{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302537 struct be_queue_info *tx_cq = &txo->cq;
2538 struct be_tx_compl_info *txcp = &txo->txcp;
2539 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302541 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002542 return NULL;
2543
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302544 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002545 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302546 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002547
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302548 txcp->status = GET_TX_COMPL_BITS(status, compl);
2549 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002550
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302551 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002552 queue_tail_inc(tx_cq);
2553 return txcp;
2554}
2555
Sathya Perla3c8def92011-06-12 20:01:58 +00002556static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302557 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002558{
Sathya Perla3c8def92011-06-12 20:01:58 +00002559 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002560 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002561 struct sk_buff *skb = NULL;
2562 bool unmap_skb_hdr = false;
2563 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302564 u16 num_wrbs = 0;
2565 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002566
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002567 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002568 if (sent_skbs[txq->tail]) {
2569 /* Free skb from prev req */
2570 if (skb)
2571 dev_consume_skb_any(skb);
2572 skb = sent_skbs[txq->tail];
2573 sent_skbs[txq->tail] = NULL;
2574 queue_tail_inc(txq); /* skip hdr wrb */
2575 num_wrbs++;
2576 unmap_skb_hdr = true;
2577 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002578 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002579 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002580 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002581 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002582 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002584 num_wrbs++;
2585 } while (frag_index != last_index);
2586 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002587
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002588 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002589}
2590
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002591/* Return the number of events in the event queue */
2592static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002593{
2594 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002595 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002596
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002597 do {
2598 eqe = queue_tail_node(&eqo->q);
2599 if (eqe->evt == 0)
2600 break;
2601
2602 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002603 eqe->evt = 0;
2604 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002605 queue_tail_inc(&eqo->q);
2606 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002607
2608 return num;
2609}
2610
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002611/* Leaves the EQ is disarmed state */
2612static void be_eq_clean(struct be_eq_obj *eqo)
2613{
2614 int num = events_get(eqo);
2615
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002616 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002617}
2618
Kalesh AP99b44302015-08-05 03:27:49 -04002619/* Free posted rx buffers that were not used */
2620static void be_rxq_clean(struct be_rx_obj *rxo)
2621{
2622 struct be_queue_info *rxq = &rxo->q;
2623 struct be_rx_page_info *page_info;
2624
2625 while (atomic_read(&rxq->used) > 0) {
2626 page_info = get_rx_page_info(rxo);
2627 put_page(page_info->page);
2628 memset(page_info, 0, sizeof(*page_info));
2629 }
2630 BUG_ON(atomic_read(&rxq->used));
2631 rxq->tail = 0;
2632 rxq->head = 0;
2633}
2634
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002635static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002636{
Sathya Perla3abcded2010-10-03 22:12:27 -07002637 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002638 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002639 struct be_adapter *adapter = rxo->adapter;
2640 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641
Sathya Perlad23e9462012-12-17 19:38:51 +00002642 /* Consume pending rx completions.
2643 * Wait for the flush completion (identified by zero num_rcvd)
2644 * to arrive. Notify CQ even when there are no more CQ entries
2645 * for HW to flush partially coalesced CQ entries.
2646 * In Lancer, there is no need to wait for flush compl.
2647 */
2648 for (;;) {
2649 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302650 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002651 if (lancer_chip(adapter))
2652 break;
2653
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302654 if (flush_wait++ > 50 ||
2655 be_check_error(adapter,
2656 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002657 dev_warn(&adapter->pdev->dev,
2658 "did not receive flush compl\n");
2659 break;
2660 }
2661 be_cq_notify(adapter, rx_cq->id, true, 0);
2662 mdelay(1);
2663 } else {
2664 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002665 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002666 if (rxcp->num_rcvd == 0)
2667 break;
2668 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002669 }
2670
Sathya Perlad23e9462012-12-17 19:38:51 +00002671 /* After cleanup, leave the CQ in unarmed state */
2672 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002673}
2674
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002675static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002676{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002677 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302678 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302679 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002680 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302681 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302682 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002683 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002684
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302685 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002686 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002687 pending_txqs = adapter->num_tx_qs;
2688
2689 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302690 cmpl = 0;
2691 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002692 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302693 while ((txcp = be_tx_compl_get(txo))) {
2694 num_wrbs +=
2695 be_tx_compl_process(adapter, txo,
2696 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002697 cmpl++;
2698 }
2699 if (cmpl) {
2700 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2701 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302702 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002703 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302704 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002705 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002706 }
2707
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302708 if (pending_txqs == 0 || ++timeo > 10 ||
2709 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002710 break;
2711
2712 mdelay(1);
2713 } while (true);
2714
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002715 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002716 for_all_tx_queues(adapter, txo, i) {
2717 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002718
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002719 if (atomic_read(&txq->used)) {
2720 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2721 i, atomic_read(&txq->used));
2722 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002723 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002724 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2725 txq->len);
2726 /* Use the tx-compl process logic to handle requests
2727 * that were not sent to the HW.
2728 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002729 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2730 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002731 BUG_ON(atomic_read(&txq->used));
2732 txo->pend_wrb_cnt = 0;
2733 /* Since hw was never notified of these requests,
2734 * reset TXQ indices
2735 */
2736 txq->head = notified_idx;
2737 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002738 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002739 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740}
2741
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002742static void be_evt_queues_destroy(struct be_adapter *adapter)
2743{
2744 struct be_eq_obj *eqo;
2745 int i;
2746
2747 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002748 if (eqo->q.created) {
2749 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002750 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302751 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302752 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002753 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002754 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002755 be_queue_free(adapter, &eqo->q);
2756 }
2757}
2758
2759static int be_evt_queues_create(struct be_adapter *adapter)
2760{
2761 struct be_queue_info *eq;
2762 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302763 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002764 int i, rc;
2765
Sathya Perlae2617682016-06-22 08:54:54 -04002766 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302767 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002768 max(adapter->cfg_num_rx_irqs,
2769 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002770
2771 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302772 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002773
Sathya Perla2632baf2013-10-01 16:00:00 +05302774 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002775 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002776 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302777 aic->max_eqd = BE_MAX_EQD;
2778 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002779
2780 eq = &eqo->q;
2781 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302782 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002783 if (rc)
2784 return rc;
2785
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302786 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002787 if (rc)
2788 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002789
2790 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2791 return -ENOMEM;
2792 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2793 eqo->affinity_mask);
2794 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2795 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002796 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002797 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798}
2799
Sathya Perla5fb379e2009-06-18 00:02:59 +00002800static void be_mcc_queues_destroy(struct be_adapter *adapter)
2801{
2802 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002803
Sathya Perla8788fdc2009-07-27 22:52:03 +00002804 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002805 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002806 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002807 be_queue_free(adapter, q);
2808
Sathya Perla8788fdc2009-07-27 22:52:03 +00002809 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002810 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002811 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002812 be_queue_free(adapter, q);
2813}
2814
2815/* Must be called only after TX qs are created as MCC shares TX EQ */
2816static int be_mcc_queues_create(struct be_adapter *adapter)
2817{
2818 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002819
Sathya Perla8788fdc2009-07-27 22:52:03 +00002820 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002821 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302822 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002823 goto err;
2824
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 /* Use the default EQ for MCC completions */
2826 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002827 goto mcc_cq_free;
2828
Sathya Perla8788fdc2009-07-27 22:52:03 +00002829 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002830 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2831 goto mcc_cq_destroy;
2832
Sathya Perla8788fdc2009-07-27 22:52:03 +00002833 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002834 goto mcc_q_free;
2835
2836 return 0;
2837
2838mcc_q_free:
2839 be_queue_free(adapter, q);
2840mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002841 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002842mcc_cq_free:
2843 be_queue_free(adapter, cq);
2844err:
2845 return -1;
2846}
2847
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002848static void be_tx_queues_destroy(struct be_adapter *adapter)
2849{
2850 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002851 struct be_tx_obj *txo;
2852 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853
Sathya Perla3c8def92011-06-12 20:01:58 +00002854 for_all_tx_queues(adapter, txo, i) {
2855 q = &txo->q;
2856 if (q->created)
2857 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2858 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002859
Sathya Perla3c8def92011-06-12 20:01:58 +00002860 q = &txo->cq;
2861 if (q->created)
2862 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2863 be_queue_free(adapter, q);
2864 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002865}
2866
Sathya Perla77071332013-08-27 16:57:34 +05302867static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002868{
Sathya Perla73f394e2015-03-26 03:05:09 -04002869 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002870 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002871 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302872 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002873
Sathya Perlae2617682016-06-22 08:54:54 -04002874 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002875
Sathya Perla3c8def92011-06-12 20:01:58 +00002876 for_all_tx_queues(adapter, txo, i) {
2877 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002878 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2879 sizeof(struct be_eth_tx_compl));
2880 if (status)
2881 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002882
John Stultz827da442013-10-07 15:51:58 -07002883 u64_stats_init(&txo->stats.sync);
2884 u64_stats_init(&txo->stats.sync_compl);
2885
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002886 /* If num_evt_qs is less than num_tx_qs, then more than
2887 * one txq share an eq
2888 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002889 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2890 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002891 if (status)
2892 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002893
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002894 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2895 sizeof(struct be_eth_wrb));
2896 if (status)
2897 return status;
2898
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002899 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002900 if (status)
2901 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002902
2903 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2904 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002905 }
2906
Sathya Perlad3791422012-09-28 04:39:44 +00002907 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2908 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002909 return 0;
2910}
2911
2912static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002913{
2914 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002915 struct be_rx_obj *rxo;
2916 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002917
Sathya Perla3abcded2010-10-03 22:12:27 -07002918 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002919 q = &rxo->cq;
2920 if (q->created)
2921 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2922 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002923 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924}
2925
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002926static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002927{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002928 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002929 struct be_rx_obj *rxo;
2930 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002931
Sathya Perlae2617682016-06-22 08:54:54 -04002932 adapter->num_rss_qs =
2933 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05302934
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002935 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04002936 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002937 adapter->num_rss_qs = 0;
2938
2939 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2940
2941 /* When the interface is not capable of RSS rings (and there is no
2942 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002943 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002944 if (adapter->num_rx_qs == 0)
2945 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302946
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002947 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002948 for_all_rx_queues(adapter, rxo, i) {
2949 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002950 cq = &rxo->cq;
2951 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302952 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002953 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002954 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002955
John Stultz827da442013-10-07 15:51:58 -07002956 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002957 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2958 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002959 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002961 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002962
Sathya Perlad3791422012-09-28 04:39:44 +00002963 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002964 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002965 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002966}
2967
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968static irqreturn_t be_intx(int irq, void *dev)
2969{
Sathya Perlae49cc342012-11-27 19:50:02 +00002970 struct be_eq_obj *eqo = dev;
2971 struct be_adapter *adapter = eqo->adapter;
2972 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002973
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002974 /* IRQ is not expected when NAPI is scheduled as the EQ
2975 * will not be armed.
2976 * But, this can happen on Lancer INTx where it takes
2977 * a while to de-assert INTx or in BE2 where occasionaly
2978 * an interrupt may be raised even when EQ is unarmed.
2979 * If NAPI is already scheduled, then counting & notifying
2980 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002981 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002982 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002983 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002984 __napi_schedule(&eqo->napi);
2985 if (num_evts)
2986 eqo->spurious_intr = 0;
2987 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002988 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002989
2990 /* Return IRQ_HANDLED only for the the first spurious intr
2991 * after a valid intr to stop the kernel from branding
2992 * this irq as a bad one!
2993 */
2994 if (num_evts || eqo->spurious_intr++ == 0)
2995 return IRQ_HANDLED;
2996 else
2997 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998}
2999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003000static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003001{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003002 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003003
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003004 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00003005 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006 return IRQ_HANDLED;
3007}
3008
Sathya Perla2e588f82011-03-11 02:49:26 +00003009static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003010{
Somnath Koture38b1702013-05-29 22:55:56 +00003011 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003012}
3013
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003014static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05303015 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003016{
Sathya Perla3abcded2010-10-03 22:12:27 -07003017 struct be_adapter *adapter = rxo->adapter;
3018 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00003019 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003020 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05303021 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003022
3023 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07003024 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003025 if (!rxcp)
3026 break;
3027
Sathya Perla12004ae2011-08-02 19:57:46 +00003028 /* Is it a flush compl that has no data */
3029 if (unlikely(rxcp->num_rcvd == 0))
3030 goto loop_continue;
3031
3032 /* Discard compl with partial DMA Lancer B0 */
3033 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003034 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003035 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00003036 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00003037
Sathya Perla12004ae2011-08-02 19:57:46 +00003038 /* On BE drop pkts that arrive due to imperfect filtering in
3039 * promiscuous mode on some skews
3040 */
3041 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05303042 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003043 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003044 goto loop_continue;
3045 }
3046
Sathya Perla6384a4d2013-10-25 10:40:16 +05303047 /* Don't do gro when we're busy_polling */
3048 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003049 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003050 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05303051 be_rx_compl_process(rxo, napi, rxcp);
3052
Sathya Perla12004ae2011-08-02 19:57:46 +00003053loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05303054 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00003055 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056 }
3057
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003058 if (work_done) {
3059 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003060
Sathya Perla6384a4d2013-10-25 10:40:16 +05303061 /* When an rx-obj gets into post_starved state, just
3062 * let be_worker do the posting.
3063 */
3064 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3065 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303066 be_post_rx_frags(rxo, GFP_ATOMIC,
3067 max_t(u32, MAX_RX_POST,
3068 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003069 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003070
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071 return work_done;
3072}
3073
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303074static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303075{
3076 switch (status) {
3077 case BE_TX_COMP_HDR_PARSE_ERR:
3078 tx_stats(txo)->tx_hdr_parse_err++;
3079 break;
3080 case BE_TX_COMP_NDMA_ERR:
3081 tx_stats(txo)->tx_dma_err++;
3082 break;
3083 case BE_TX_COMP_ACL_ERR:
3084 tx_stats(txo)->tx_spoof_check_err++;
3085 break;
3086 }
3087}
3088
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303089static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303090{
3091 switch (status) {
3092 case LANCER_TX_COMP_LSO_ERR:
3093 tx_stats(txo)->tx_tso_err++;
3094 break;
3095 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3096 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3097 tx_stats(txo)->tx_spoof_check_err++;
3098 break;
3099 case LANCER_TX_COMP_QINQ_ERR:
3100 tx_stats(txo)->tx_qinq_err++;
3101 break;
3102 case LANCER_TX_COMP_PARITY_ERR:
3103 tx_stats(txo)->tx_internal_parity_err++;
3104 break;
3105 case LANCER_TX_COMP_DMA_ERR:
3106 tx_stats(txo)->tx_dma_err++;
3107 break;
3108 }
3109}
3110
Sathya Perlac8f64612014-09-02 09:56:55 +05303111static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3112 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003113{
Sathya Perlac8f64612014-09-02 09:56:55 +05303114 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303115 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003116
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303117 while ((txcp = be_tx_compl_get(txo))) {
3118 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303119 work_done++;
3120
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303121 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303122 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303123 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303124 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303125 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303126 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003127 }
3128
3129 if (work_done) {
3130 be_cq_notify(adapter, txo->cq.id, true, work_done);
3131 atomic_sub(num_wrbs, &txo->q.used);
3132
3133 /* As Tx wrbs have been freed up, wake up netdev queue
3134 * if it was stopped due to lack of tx wrbs. */
3135 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303136 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003137 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003138 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003139
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003140 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3141 tx_stats(txo)->tx_compl += work_done;
3142 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3143 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003144}
Sathya Perla3c8def92011-06-12 20:01:58 +00003145
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003146#ifdef CONFIG_NET_RX_BUSY_POLL
3147static inline bool be_lock_napi(struct be_eq_obj *eqo)
3148{
3149 bool status = true;
3150
3151 spin_lock(&eqo->lock); /* BH is already disabled */
3152 if (eqo->state & BE_EQ_LOCKED) {
3153 WARN_ON(eqo->state & BE_EQ_NAPI);
3154 eqo->state |= BE_EQ_NAPI_YIELD;
3155 status = false;
3156 } else {
3157 eqo->state = BE_EQ_NAPI;
3158 }
3159 spin_unlock(&eqo->lock);
3160 return status;
3161}
3162
3163static inline void be_unlock_napi(struct be_eq_obj *eqo)
3164{
3165 spin_lock(&eqo->lock); /* BH is already disabled */
3166
3167 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3168 eqo->state = BE_EQ_IDLE;
3169
3170 spin_unlock(&eqo->lock);
3171}
3172
3173static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3174{
3175 bool status = true;
3176
3177 spin_lock_bh(&eqo->lock);
3178 if (eqo->state & BE_EQ_LOCKED) {
3179 eqo->state |= BE_EQ_POLL_YIELD;
3180 status = false;
3181 } else {
3182 eqo->state |= BE_EQ_POLL;
3183 }
3184 spin_unlock_bh(&eqo->lock);
3185 return status;
3186}
3187
3188static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3189{
3190 spin_lock_bh(&eqo->lock);
3191
3192 WARN_ON(eqo->state & (BE_EQ_NAPI));
3193 eqo->state = BE_EQ_IDLE;
3194
3195 spin_unlock_bh(&eqo->lock);
3196}
3197
3198static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3199{
3200 spin_lock_init(&eqo->lock);
3201 eqo->state = BE_EQ_IDLE;
3202}
3203
3204static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3205{
3206 local_bh_disable();
3207
3208 /* It's enough to just acquire napi lock on the eqo to stop
3209 * be_busy_poll() from processing any queueus.
3210 */
3211 while (!be_lock_napi(eqo))
3212 mdelay(1);
3213
3214 local_bh_enable();
3215}
3216
3217#else /* CONFIG_NET_RX_BUSY_POLL */
3218
3219static inline bool be_lock_napi(struct be_eq_obj *eqo)
3220{
3221 return true;
3222}
3223
3224static inline void be_unlock_napi(struct be_eq_obj *eqo)
3225{
3226}
3227
3228static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3229{
3230 return false;
3231}
3232
3233static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3234{
3235}
3236
3237static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3238{
3239}
3240
3241static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3242{
3243}
3244#endif /* CONFIG_NET_RX_BUSY_POLL */
3245
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303246int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003247{
3248 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3249 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003250 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303251 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303252 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003253 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003254
Sathya Perla0b545a62012-11-23 00:27:18 +00003255 num_evts = events_get(eqo);
3256
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303257 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3258 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003259
Sathya Perla6384a4d2013-10-25 10:40:16 +05303260 if (be_lock_napi(eqo)) {
3261 /* This loop will iterate twice for EQ0 in which
3262 * completions of the last RXQ (default one) are also processed
3263 * For other EQs the loop iterates only once
3264 */
3265 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3266 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3267 max_work = max(work, max_work);
3268 }
3269 be_unlock_napi(eqo);
3270 } else {
3271 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003272 }
3273
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003274 if (is_mcc_eqo(eqo))
3275 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003276
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003277 if (max_work < budget) {
3278 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003279
3280 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3281 * delay via a delay multiplier encoding value
3282 */
3283 if (skyhawk_chip(adapter))
3284 mult_enc = be_get_eq_delay_mult_enc(eqo);
3285
3286 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3287 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003288 } else {
3289 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003290 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003291 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003292 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003293}
3294
Sathya Perla6384a4d2013-10-25 10:40:16 +05303295#ifdef CONFIG_NET_RX_BUSY_POLL
3296static int be_busy_poll(struct napi_struct *napi)
3297{
3298 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3299 struct be_adapter *adapter = eqo->adapter;
3300 struct be_rx_obj *rxo;
3301 int i, work = 0;
3302
3303 if (!be_lock_busy_poll(eqo))
3304 return LL_FLUSH_BUSY;
3305
3306 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3307 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3308 if (work)
3309 break;
3310 }
3311
3312 be_unlock_busy_poll(eqo);
3313 return work;
3314}
3315#endif
3316
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003317void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003318{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003319 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3320 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003321 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303322 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003323
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303324 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003325 return;
3326
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003327 if (lancer_chip(adapter)) {
3328 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3329 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303330 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003331 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303332 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003333 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303334 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303335 /* Do not log error messages if its a FW reset */
3336 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3337 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3338 dev_info(dev, "Firmware update in progress\n");
3339 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303340 dev_err(dev, "Error detected in the card\n");
3341 dev_err(dev, "ERR: sliport status 0x%x\n",
3342 sliport_status);
3343 dev_err(dev, "ERR: sliport error1 0x%x\n",
3344 sliport_err1);
3345 dev_err(dev, "ERR: sliport error2 0x%x\n",
3346 sliport_err2);
3347 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003348 }
3349 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003350 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3351 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3352 ue_lo_mask = ioread32(adapter->pcicfg +
3353 PCICFG_UE_STATUS_LOW_MASK);
3354 ue_hi_mask = ioread32(adapter->pcicfg +
3355 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003356
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003357 ue_lo = (ue_lo & ~ue_lo_mask);
3358 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003359
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303360 /* On certain platforms BE hardware can indicate spurious UEs.
3361 * Allow HW to stop working completely in case of a real UE.
3362 * Hence not setting the hw_error for UE detection.
3363 */
3364
3365 if (ue_lo || ue_hi) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303366 dev_err(dev, "Error detected in the adapter");
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303367 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303368 be_set_error(adapter, BE_ERROR_UE);
3369
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303370 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3371 if (ue_lo & 1)
3372 dev_err(dev, "UE: %s bit set\n",
3373 ue_status_low_desc[i]);
3374 }
3375 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3376 if (ue_hi & 1)
3377 dev_err(dev, "UE: %s bit set\n",
3378 ue_status_hi_desc[i]);
3379 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303380 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003381 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003382}
3383
Sathya Perla8d56ff12009-11-22 22:02:26 +00003384static void be_msix_disable(struct be_adapter *adapter)
3385{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003386 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003387 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003388 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303389 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003390 }
3391}
3392
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003393static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003394{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003395 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003396 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003397 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003398
Sathya Perlace7faf02016-06-22 08:54:53 -04003399 /* If RoCE is supported, program the max number of vectors that
3400 * could be used for NIC and RoCE, else, just program the number
3401 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303402 */
Sathya Perlae2617682016-06-22 08:54:54 -04003403 if (be_roce_supported(adapter)) {
3404 max_roce_eqs =
3405 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3406 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3407 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3408 } else {
3409 num_vec = max(adapter->cfg_num_rx_irqs,
3410 adapter->cfg_num_tx_irqs);
3411 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003412
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003413 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003414 adapter->msix_entries[i].entry = i;
3415
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003416 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3417 MIN_MSIX_VECTORS, num_vec);
3418 if (num_vec < 0)
3419 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003420
Sathya Perla92bf14a2013-08-27 16:57:32 +05303421 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3422 adapter->num_msix_roce_vec = num_vec / 2;
3423 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3424 adapter->num_msix_roce_vec);
3425 }
3426
3427 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3428
3429 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3430 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003431 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003432
3433fail:
3434 dev_warn(dev, "MSIx enable failed\n");
3435
3436 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003437 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003438 return num_vec;
3439 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003440}
3441
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003442static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303443 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003444{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303445 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003446}
3447
3448static int be_msix_register(struct be_adapter *adapter)
3449{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003450 struct net_device *netdev = adapter->netdev;
3451 struct be_eq_obj *eqo;
3452 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003453
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003454 for_all_evt_queues(adapter, eqo, i) {
3455 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3456 vec = be_msix_vec_get(adapter, eqo);
3457 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003458 if (status)
3459 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003460
3461 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003462 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003463
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003464 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003465err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303466 for (i--; i >= 0; i--) {
3467 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003468 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303469 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003470 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303471 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003472 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003473 return status;
3474}
3475
3476static int be_irq_register(struct be_adapter *adapter)
3477{
3478 struct net_device *netdev = adapter->netdev;
3479 int status;
3480
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003481 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003482 status = be_msix_register(adapter);
3483 if (status == 0)
3484 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003485 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003486 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003487 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003488 }
3489
Sathya Perlae49cc342012-11-27 19:50:02 +00003490 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003491 netdev->irq = adapter->pdev->irq;
3492 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003493 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494 if (status) {
3495 dev_err(&adapter->pdev->dev,
3496 "INTx request IRQ failed - err %d\n", status);
3497 return status;
3498 }
3499done:
3500 adapter->isr_registered = true;
3501 return 0;
3502}
3503
3504static void be_irq_unregister(struct be_adapter *adapter)
3505{
3506 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003507 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003508 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003509
3510 if (!adapter->isr_registered)
3511 return;
3512
3513 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003514 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003515 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003516 goto done;
3517 }
3518
3519 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003520 for_all_evt_queues(adapter, eqo, i) {
3521 vec = be_msix_vec_get(adapter, eqo);
3522 irq_set_affinity_hint(vec, NULL);
3523 free_irq(vec, eqo);
3524 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003525
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003526done:
3527 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003528}
3529
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003530static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003531{
Ajit Khaparde62219062016-02-10 22:45:53 +05303532 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003533 struct be_queue_info *q;
3534 struct be_rx_obj *rxo;
3535 int i;
3536
3537 for_all_rx_queues(adapter, rxo, i) {
3538 q = &rxo->q;
3539 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003540 /* If RXQs are destroyed while in an "out of buffer"
3541 * state, there is a possibility of an HW stall on
3542 * Lancer. So, post 64 buffers to each queue to relieve
3543 * the "out of buffer" condition.
3544 * Make sure there's space in the RXQ before posting.
3545 */
3546 if (lancer_chip(adapter)) {
3547 be_rx_cq_clean(rxo);
3548 if (atomic_read(&q->used) == 0)
3549 be_post_rx_frags(rxo, GFP_KERNEL,
3550 MAX_RX_POST);
3551 }
3552
Sathya Perla482c9e72011-06-29 23:33:17 +00003553 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003554 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003555 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003556 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003557 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003558 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303559
3560 if (rss->rss_flags) {
3561 rss->rss_flags = RSS_ENABLE_NONE;
3562 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3563 128, rss->rss_hkey);
3564 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003565}
3566
Kalesh APbcc84142015-08-05 03:27:48 -04003567static void be_disable_if_filters(struct be_adapter *adapter)
3568{
3569 be_cmd_pmac_del(adapter, adapter->if_handle,
3570 adapter->pmac_id[0], 0);
3571
3572 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003573 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003574
3575 /* The IFACE flags are enabled in the open path and cleared
3576 * in the close path. When a VF gets detached from the host and
3577 * assigned to a VM the following happens:
3578 * - VF's IFACE flags get cleared in the detach path
3579 * - IFACE create is issued by the VF in the attach path
3580 * Due to a bug in the BE3/Skyhawk-R FW
3581 * (Lancer FW doesn't have the bug), the IFACE capability flags
3582 * specified along with the IFACE create cmd issued by a VF are not
3583 * honoured by FW. As a consequence, if a *new* driver
3584 * (that enables/disables IFACE flags in open/close)
3585 * is loaded in the host and an *old* driver is * used by a VM/VF,
3586 * the IFACE gets created *without* the needed flags.
3587 * To avoid this, disable RX-filter flags only for Lancer.
3588 */
3589 if (lancer_chip(adapter)) {
3590 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3591 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3592 }
3593}
3594
Sathya Perla889cd4b2010-05-30 23:33:45 +00003595static int be_close(struct net_device *netdev)
3596{
3597 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003598 struct be_eq_obj *eqo;
3599 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003600
Kalesh APe1ad8e32014-04-14 16:12:41 +05303601 /* This protection is needed as be_close() may be called even when the
3602 * adapter is in cleared state (after eeh perm failure)
3603 */
3604 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3605 return 0;
3606
Sathya Perlab7172412016-07-27 05:26:18 -04003607 /* Before attempting cleanup ensure all the pending cmds in the
3608 * config_wq have finished execution
3609 */
3610 flush_workqueue(be_wq);
3611
Kalesh APbcc84142015-08-05 03:27:48 -04003612 be_disable_if_filters(adapter);
3613
Ivan Veceradff345c52013-11-27 08:59:32 +01003614 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3615 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003616 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303617 be_disable_busy_poll(eqo);
3618 }
David S. Miller71237b62013-11-28 18:53:36 -05003619 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003620 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003621
3622 be_async_mcc_disable(adapter);
3623
3624 /* Wait for all pending tx completions to arrive so that
3625 * all tx skbs are freed.
3626 */
Sathya Perlafba87552013-05-08 02:05:50 +00003627 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303628 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003629
3630 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003631
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003632 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003633 if (msix_enabled(adapter))
3634 synchronize_irq(be_msix_vec_get(adapter, eqo));
3635 else
3636 synchronize_irq(netdev->irq);
3637 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003638 }
3639
Sathya Perla889cd4b2010-05-30 23:33:45 +00003640 be_irq_unregister(adapter);
3641
Sathya Perla482c9e72011-06-29 23:33:17 +00003642 return 0;
3643}
3644
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003645static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003646{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003647 struct rss_info *rss = &adapter->rss_info;
3648 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003649 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003650 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003651
3652 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003653 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3654 sizeof(struct be_eth_rx_d));
3655 if (rc)
3656 return rc;
3657 }
3658
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003659 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3660 rxo = default_rxo(adapter);
3661 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3662 rx_frag_size, adapter->if_handle,
3663 false, &rxo->rss_id);
3664 if (rc)
3665 return rc;
3666 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003667
3668 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003669 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003670 rx_frag_size, adapter->if_handle,
3671 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003672 if (rc)
3673 return rc;
3674 }
3675
3676 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003677 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003678 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303679 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003680 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303681 rss->rsstable[j + i] = rxo->rss_id;
3682 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003683 }
3684 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303685 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3686 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003687
3688 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303689 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3690 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303691
3692 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3693 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3694 RSS_INDIR_TABLE_LEN, rss_key);
3695 if (rc) {
3696 rss->rss_flags = RSS_ENABLE_NONE;
3697 return rc;
3698 }
3699
3700 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303701 } else {
3702 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303703 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303704 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003705
Venkata Duvvurue2557872014-04-21 15:38:00 +05303706
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003707 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3708 * which is a queue empty condition
3709 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003710 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003711 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3712
Sathya Perla889cd4b2010-05-30 23:33:45 +00003713 return 0;
3714}
3715
Kalesh APbcc84142015-08-05 03:27:48 -04003716static int be_enable_if_filters(struct be_adapter *adapter)
3717{
3718 int status;
3719
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003720 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003721 if (status)
3722 return status;
3723
3724 /* For BE3 VFs, the PF programs the initial MAC address */
3725 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3726 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3727 adapter->if_handle,
3728 &adapter->pmac_id[0], 0);
3729 if (status)
3730 return status;
3731 }
3732
3733 if (adapter->vlans_added)
3734 be_vid_config(adapter);
3735
Sathya Perlab7172412016-07-27 05:26:18 -04003736 __be_set_rx_mode(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003737
3738 return 0;
3739}
3740
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003741static int be_open(struct net_device *netdev)
3742{
3743 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003744 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003745 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003746 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003747 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003748 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003749
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003750 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003751 if (status)
3752 goto err;
3753
Kalesh APbcc84142015-08-05 03:27:48 -04003754 status = be_enable_if_filters(adapter);
3755 if (status)
3756 goto err;
3757
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003758 status = be_irq_register(adapter);
3759 if (status)
3760 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003761
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003762 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003763 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003764
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003765 for_all_tx_queues(adapter, txo, i)
3766 be_cq_notify(adapter, txo->cq.id, true, 0);
3767
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003768 be_async_mcc_enable(adapter);
3769
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003770 for_all_evt_queues(adapter, eqo, i) {
3771 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303772 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003773 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003774 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003775 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003776
Sathya Perla323ff712012-09-28 04:39:43 +00003777 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003778 if (!status)
3779 be_link_status_update(adapter, link_status);
3780
Sathya Perlafba87552013-05-08 02:05:50 +00003781 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303782 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003783 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303784
Sathya Perla889cd4b2010-05-30 23:33:45 +00003785 return 0;
3786err:
3787 be_close(adapter->netdev);
3788 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003789}
3790
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003791static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3792{
3793 u32 addr;
3794
3795 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3796
3797 mac[5] = (u8)(addr & 0xFF);
3798 mac[4] = (u8)((addr >> 8) & 0xFF);
3799 mac[3] = (u8)((addr >> 16) & 0xFF);
3800 /* Use the OUI from the current MAC address */
3801 memcpy(mac, adapter->netdev->dev_addr, 3);
3802}
3803
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003804/*
3805 * Generate a seed MAC address from the PF MAC Address using jhash.
3806 * MAC Address for VFs are assigned incrementally starting from the seed.
3807 * These addresses are programmed in the ASIC by the PF and the VF driver
3808 * queries for the MAC address during its probe.
3809 */
Sathya Perla4c876612013-02-03 20:30:11 +00003810static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003811{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003812 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003813 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003814 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003815 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003816
3817 be_vf_eth_addr_generate(adapter, mac);
3818
Sathya Perla11ac75e2011-12-13 00:58:50 +00003819 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303820 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003821 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003822 vf_cfg->if_handle,
3823 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303824 else
3825 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3826 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003827
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003828 if (status)
3829 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303830 "Mac address assignment failed for VF %d\n",
3831 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003832 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003833 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003834
3835 mac[5] += 1;
3836 }
3837 return status;
3838}
3839
Sathya Perla4c876612013-02-03 20:30:11 +00003840static int be_vfs_mac_query(struct be_adapter *adapter)
3841{
3842 int status, vf;
3843 u8 mac[ETH_ALEN];
3844 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003845
3846 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303847 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3848 mac, vf_cfg->if_handle,
3849 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003850 if (status)
3851 return status;
3852 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3853 }
3854 return 0;
3855}
3856
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003857static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003858{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003859 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003860 u32 vf;
3861
Sathya Perla257a3fe2013-06-14 15:54:51 +05303862 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003863 dev_warn(&adapter->pdev->dev,
3864 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003865 goto done;
3866 }
3867
Sathya Perlab4c1df92013-05-08 02:05:47 +00003868 pci_disable_sriov(adapter->pdev);
3869
Sathya Perla11ac75e2011-12-13 00:58:50 +00003870 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303871 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003872 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3873 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303874 else
3875 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3876 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003877
Sathya Perla11ac75e2011-12-13 00:58:50 +00003878 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3879 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003880
3881 if (BE3_chip(adapter))
3882 be_cmd_set_hsw_config(adapter, 0, 0,
3883 adapter->if_handle,
3884 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003885done:
3886 kfree(adapter->vf_cfg);
3887 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303888 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003889}
3890
Sathya Perla77071332013-08-27 16:57:34 +05303891static void be_clear_queues(struct be_adapter *adapter)
3892{
3893 be_mcc_queues_destroy(adapter);
3894 be_rx_cqs_destroy(adapter);
3895 be_tx_queues_destroy(adapter);
3896 be_evt_queues_destroy(adapter);
3897}
3898
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303899static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003900{
Sathya Perla191eb752012-02-23 18:50:13 +00003901 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3902 cancel_delayed_work_sync(&adapter->work);
3903 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3904 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303905}
3906
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003907static void be_cancel_err_detection(struct be_adapter *adapter)
3908{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303909 struct be_error_recovery *err_rec = &adapter->error_recovery;
3910
3911 if (!be_err_recovery_workq)
3912 return;
3913
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003914 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303915 cancel_delayed_work_sync(&err_rec->err_detection_work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003916 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3917 }
3918}
3919
Sathya Perlac9c47142014-03-27 10:46:19 +05303920static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3921{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003922 struct net_device *netdev = adapter->netdev;
3923
Sathya Perlac9c47142014-03-27 10:46:19 +05303924 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3925 be_cmd_manage_iface(adapter, adapter->if_handle,
3926 OP_CONVERT_TUNNEL_TO_NORMAL);
3927
3928 if (adapter->vxlan_port)
3929 be_cmd_set_vxlan_port(adapter, 0);
3930
3931 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3932 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003933
3934 netdev->hw_enc_features = 0;
3935 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303936 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303937}
3938
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003939static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3940 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05003941{
3942 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003943 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3944 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003945 u16 num_vf_qs = 1;
3946
Somnath Koturde2b1e02016-06-06 07:22:10 -04003947 /* Distribute the queue resources among the PF and it's VFs */
3948 if (num_vfs) {
3949 /* Divide the rx queues evenly among the VFs and the PF, capped
3950 * at VF-EQ-count. Any remainder queues belong to the PF.
3951 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05303952 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3953 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05003954
Somnath Koturde2b1e02016-06-06 07:22:10 -04003955 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
3956 * RSS Tables per port. Provide RSS on VFs, only if number of
3957 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05003958 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04003959 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05003960 num_vf_qs = 1;
3961 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003962
3963 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3964 * which are modifiable using SET_PROFILE_CONFIG cmd.
3965 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04003966 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
3967 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003968
3969 /* If RSS IFACE capability flags are modifiable for a VF, set the
3970 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3971 * more than 1 RSSQ is available for a VF.
3972 * Otherwise, provision only 1 queue pair for VF.
3973 */
3974 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3975 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3976 if (num_vf_qs > 1) {
3977 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3978 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3979 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3980 } else {
3981 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3982 BE_IF_FLAGS_DEFQ_RSS);
3983 }
3984 } else {
3985 num_vf_qs = 1;
3986 }
3987
3988 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3989 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3990 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3991 }
3992
3993 vft_res->vf_if_cap_flags = vf_if_cap_flags;
3994 vft_res->max_rx_qs = num_vf_qs;
3995 vft_res->max_rss_qs = num_vf_qs;
3996 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
3997 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
3998
3999 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4000 * among the PF and it's VFs, if the fields are changeable
4001 */
4002 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4003 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4004
4005 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4006 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4007
4008 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4009 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4010
4011 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4012 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004013}
4014
Sathya Perlab7172412016-07-27 05:26:18 -04004015static void be_if_destroy(struct be_adapter *adapter)
4016{
4017 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4018
4019 kfree(adapter->pmac_id);
4020 adapter->pmac_id = NULL;
4021
4022 kfree(adapter->mc_list);
4023 adapter->mc_list = NULL;
4024
4025 kfree(adapter->uc_list);
4026 adapter->uc_list = NULL;
4027}
4028
Somnath Koturb05004a2013-12-05 12:08:16 +05304029static int be_clear(struct be_adapter *adapter)
4030{
Vasundhara Volamf2858732015-03-04 00:44:33 -05004031 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004032 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004033
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304034 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00004035
Sathya Perlab7172412016-07-27 05:26:18 -04004036 flush_workqueue(be_wq);
4037
Sathya Perla11ac75e2011-12-13 00:58:50 +00004038 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004039 be_vf_clear(adapter);
4040
Vasundhara Volambec84e62014-06-30 13:01:32 +05304041 /* Re-configure FW to distribute resources evenly across max-supported
4042 * number of VFs, only when VFs are not already enabled.
4043 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05004044 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4045 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004046 be_calculate_vf_res(adapter,
4047 pci_sriov_get_totalvfs(pdev),
4048 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304049 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004050 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004051 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004052 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304053
Sathya Perlac9c47142014-03-27 10:46:19 +05304054 be_disable_vxlan_offloads(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004055
Sathya Perlab7172412016-07-27 05:26:18 -04004056 be_if_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004057
Sathya Perla77071332013-08-27 16:57:34 +05304058 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004059
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004060 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304061 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004062 return 0;
4063}
4064
Sathya Perla4c876612013-02-03 20:30:11 +00004065static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004066{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304067 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04004068 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00004069 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05004070 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004071
Kalesh AP0700d812015-01-20 03:51:43 -05004072 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004073 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004074
Sathya Perla4c876612013-02-03 20:30:11 +00004075 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304076 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04004077 status = be_cmd_get_profile_config(adapter, &res, NULL,
4078 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004079 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05304080 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004081 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304082 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004083 /* Prevent VFs from enabling VLAN promiscuous
4084 * mode
4085 */
4086 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4087 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304088 }
Sathya Perla4c876612013-02-03 20:30:11 +00004089
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004090 /* PF should enable IF flags during proxy if_create call */
4091 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004092 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4093 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004094 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004095 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004096 }
Kalesh AP0700d812015-01-20 03:51:43 -05004097
4098 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004099}
4100
Sathya Perla39f1d942012-05-08 19:41:24 +00004101static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004102{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004103 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004104 int vf;
4105
Sathya Perla39f1d942012-05-08 19:41:24 +00004106 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4107 GFP_KERNEL);
4108 if (!adapter->vf_cfg)
4109 return -ENOMEM;
4110
Sathya Perla11ac75e2011-12-13 00:58:50 +00004111 for_all_vfs(adapter, vf_cfg, vf) {
4112 vf_cfg->if_handle = -1;
4113 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004114 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004115 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004116}
4117
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004118static int be_vf_setup(struct be_adapter *adapter)
4119{
Sathya Perla4c876612013-02-03 20:30:11 +00004120 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304121 struct be_vf_cfg *vf_cfg;
4122 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004123 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004124
Sathya Perla257a3fe2013-06-14 15:54:51 +05304125 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004126
4127 status = be_vf_setup_init(adapter);
4128 if (status)
4129 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004130
Sathya Perla4c876612013-02-03 20:30:11 +00004131 if (old_vfs) {
4132 for_all_vfs(adapter, vf_cfg, vf) {
4133 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4134 if (status)
4135 goto err;
4136 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004137
Sathya Perla4c876612013-02-03 20:30:11 +00004138 status = be_vfs_mac_query(adapter);
4139 if (status)
4140 goto err;
4141 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304142 status = be_vfs_if_create(adapter);
4143 if (status)
4144 goto err;
4145
Sathya Perla39f1d942012-05-08 19:41:24 +00004146 status = be_vf_eth_addr_config(adapter);
4147 if (status)
4148 goto err;
4149 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004150
Sathya Perla11ac75e2011-12-13 00:58:50 +00004151 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304152 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004153 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4154 vf + 1);
4155 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304156 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004157 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304158 BE_PRIV_FILTMGMT,
4159 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004160 if (!status) {
4161 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304162 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4163 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004164 }
Sathya Perla04a06022013-07-23 15:25:00 +05304165 }
4166
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304167 /* Allow full available bandwidth */
4168 if (!old_vfs)
4169 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004170
Kalesh APe7bcbd72015-05-06 05:30:32 -04004171 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4172 vf_cfg->if_handle, NULL,
4173 &spoofchk);
4174 if (!status)
4175 vf_cfg->spoofchk = spoofchk;
4176
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304177 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304178 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304179 be_cmd_set_logical_link_config(adapter,
4180 IFLA_VF_LINK_STATE_AUTO,
4181 vf+1);
4182 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004183 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004184
4185 if (!old_vfs) {
4186 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4187 if (status) {
4188 dev_err(dev, "SRIOV enable failed\n");
4189 adapter->num_vfs = 0;
4190 goto err;
4191 }
4192 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304193
Somnath Kotur884476b2016-06-22 08:54:55 -04004194 if (BE3_chip(adapter)) {
4195 /* On BE3, enable VEB only when SRIOV is enabled */
4196 status = be_cmd_set_hsw_config(adapter, 0, 0,
4197 adapter->if_handle,
4198 PORT_FWD_TYPE_VEB, 0);
4199 if (status)
4200 goto err;
4201 }
4202
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304203 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004204 return 0;
4205err:
Sathya Perla4c876612013-02-03 20:30:11 +00004206 dev_err(dev, "VF setup failed\n");
4207 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004208 return status;
4209}
4210
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304211/* Converting function_mode bits on BE3 to SH mc_type enums */
4212
4213static u8 be_convert_mc_type(u32 function_mode)
4214{
Suresh Reddy66064db2014-06-23 16:41:29 +05304215 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304216 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304217 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304218 return FLEX10;
4219 else if (function_mode & VNIC_MODE)
4220 return vNIC2;
4221 else if (function_mode & UMC_ENABLED)
4222 return UMC;
4223 else
4224 return MC_NONE;
4225}
4226
Sathya Perla92bf14a2013-08-27 16:57:32 +05304227/* On BE2/BE3 FW does not suggest the supported limits */
4228static void BEx_get_resources(struct be_adapter *adapter,
4229 struct be_resources *res)
4230{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304231 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304232
4233 if (be_physfn(adapter))
4234 res->max_uc_mac = BE_UC_PMAC_COUNT;
4235 else
4236 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4237
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304238 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4239
4240 if (be_is_mc(adapter)) {
4241 /* Assuming that there are 4 channels per port,
4242 * when multi-channel is enabled
4243 */
4244 if (be_is_qnq_mode(adapter))
4245 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4246 else
4247 /* In a non-qnq multichannel mode, the pvid
4248 * takes up one vlan entry
4249 */
4250 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4251 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304252 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304253 }
4254
Sathya Perla92bf14a2013-08-27 16:57:32 +05304255 res->max_mcast_mac = BE_MAX_MC;
4256
Vasundhara Volama5243da2014-03-11 18:53:07 +05304257 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4258 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4259 * *only* if it is RSS-capable.
4260 */
4261 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004262 be_virtfn(adapter) ||
4263 (be_is_mc(adapter) &&
4264 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304265 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304266 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4267 struct be_resources super_nic_res = {0};
4268
4269 /* On a SuperNIC profile, the driver needs to use the
4270 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4271 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004272 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4273 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4274 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304275 /* Some old versions of BE3 FW don't report max_tx_qs value */
4276 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4277 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304278 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304279 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304280
4281 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4282 !use_sriov && be_physfn(adapter))
4283 res->max_rss_qs = (adapter->be3_native) ?
4284 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4285 res->max_rx_qs = res->max_rss_qs + 1;
4286
Suresh Reddye3dc8672014-01-06 13:02:25 +05304287 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304288 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304289 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4290 else
4291 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304292
4293 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004294 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304295 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4296 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4297}
4298
Sathya Perla30128032011-11-10 19:17:57 +00004299static void be_setup_init(struct be_adapter *adapter)
4300{
4301 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004302 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004303 adapter->if_handle = -1;
4304 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004305 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304306 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004307 if (be_physfn(adapter))
4308 adapter->cmd_privileges = MAX_PRIVILEGES;
4309 else
4310 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004311}
4312
Somnath Koturde2b1e02016-06-06 07:22:10 -04004313/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4314 * However, this HW limitation is not exposed to the host via any SLI cmd.
4315 * As a result, in the case of SRIOV and in particular multi-partition configs
4316 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4317 * for distribution between the VFs. This self-imposed limit will determine the
4318 * no: of VFs for which RSS can be enabled.
4319 */
4320void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4321{
4322 struct be_port_resources port_res = {0};
4323 u8 rss_tables_on_port;
4324 u16 max_vfs = be_max_vfs(adapter);
4325
4326 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4327 RESOURCE_LIMITS, 0);
4328
4329 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4330
4331 /* Each PF Pool's RSS Tables limit =
4332 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4333 */
4334 adapter->pool_res.max_rss_tables =
4335 max_vfs * rss_tables_on_port / port_res.max_vfs;
4336}
4337
Vasundhara Volambec84e62014-06-30 13:01:32 +05304338static int be_get_sriov_config(struct be_adapter *adapter)
4339{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304340 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304341 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304342
Somnath Koturde2b1e02016-06-06 07:22:10 -04004343 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4344 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304345
Vasundhara Volamace40af2015-03-04 00:44:34 -05004346 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304347 if (BE3_chip(adapter) && !res.max_vfs) {
4348 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4349 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4350 }
4351
Sathya Perlad3d18312014-08-01 17:47:30 +05304352 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304353
Vasundhara Volamace40af2015-03-04 00:44:34 -05004354 /* If during previous unload of the driver, the VFs were not disabled,
4355 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4356 * Instead use the TotalVFs value stored in the pci-dev struct.
4357 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304358 old_vfs = pci_num_vf(adapter->pdev);
4359 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004360 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4361 old_vfs);
4362
4363 adapter->pool_res.max_vfs =
4364 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304365 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304366 }
4367
Somnath Koturde2b1e02016-06-06 07:22:10 -04004368 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4369 be_calculate_pf_pool_rss_tables(adapter);
4370 dev_info(&adapter->pdev->dev,
4371 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4372 be_max_pf_pool_rss_tables(adapter));
4373 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304374 return 0;
4375}
4376
Vasundhara Volamace40af2015-03-04 00:44:34 -05004377static void be_alloc_sriov_res(struct be_adapter *adapter)
4378{
4379 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004380 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004381 int status;
4382
4383 be_get_sriov_config(adapter);
4384
4385 if (!old_vfs)
4386 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4387
4388 /* When the HW is in SRIOV capable configuration, the PF-pool
4389 * resources are given to PF during driver load, if there are no
4390 * old VFs. This facility is not available in BE3 FW.
4391 * Also, this is done by FW in Lancer chip.
4392 */
4393 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004394 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004395 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004396 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004397 if (status)
4398 dev_err(&adapter->pdev->dev,
4399 "Failed to optimize SRIOV resources\n");
4400 }
4401}
4402
Sathya Perla92bf14a2013-08-27 16:57:32 +05304403static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004404{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304405 struct device *dev = &adapter->pdev->dev;
4406 struct be_resources res = {0};
4407 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004408
Sathya Perla92bf14a2013-08-27 16:57:32 +05304409 /* For Lancer, SH etc read per-function resource limits from FW.
4410 * GET_FUNC_CONFIG returns per function guaranteed limits.
4411 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4412 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004413 if (BEx_chip(adapter)) {
4414 BEx_get_resources(adapter, &res);
4415 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304416 status = be_cmd_get_func_config(adapter, &res);
4417 if (status)
4418 return status;
4419
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004420 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4421 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4422 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4423 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004424 }
4425
Sathya Perlace7faf02016-06-22 08:54:53 -04004426 /* If RoCE is supported stash away half the EQs for RoCE */
4427 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4428 res.max_evt_qs / 2 : res.max_evt_qs;
4429 adapter->res = res;
4430
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004431 /* If FW supports RSS default queue, then skip creating non-RSS
4432 * queue for non-IP traffic.
4433 */
4434 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4435 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4436
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304437 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4438 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004439 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304440 be_max_vfs(adapter));
4441 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4442 be_max_uc(adapter), be_max_mc(adapter),
4443 be_max_vlans(adapter));
4444
Sathya Perlae2617682016-06-22 08:54:54 -04004445 /* Ensure RX and TX queues are created in pairs at init time */
4446 adapter->cfg_num_rx_irqs =
4447 min_t(u16, netif_get_num_default_rss_queues(),
4448 be_max_qp_irqs(adapter));
4449 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304450 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004451}
4452
Sathya Perla39f1d942012-05-08 19:41:24 +00004453static int be_get_config(struct be_adapter *adapter)
4454{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004455 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304456 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004457
Suresh Reddy980df242015-12-30 01:29:03 -05004458 status = be_cmd_get_cntl_attributes(adapter);
4459 if (status)
4460 return status;
4461
Kalesh APe97e3cd2014-07-17 16:20:26 +05304462 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004463 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304464 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004465
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004466 if (!lancer_chip(adapter) && be_physfn(adapter))
4467 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4468
Sathya Perla6b085ba2015-02-23 04:20:09 -05004469 if (BEx_chip(adapter)) {
4470 level = be_cmd_get_fw_log_level(adapter);
4471 adapter->msg_enable =
4472 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4473 }
4474
4475 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004476 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4477 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004478
Vasundhara Volam21252372015-02-06 08:18:42 -05004479 be_cmd_query_port_name(adapter);
4480
4481 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304482 status = be_cmd_get_active_profile(adapter, &profile_id);
4483 if (!status)
4484 dev_info(&adapter->pdev->dev,
4485 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304486 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304487
Sathya Perla92bf14a2013-08-27 16:57:32 +05304488 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004489}
4490
Sathya Perla95046b92013-07-23 15:25:02 +05304491static int be_mac_setup(struct be_adapter *adapter)
4492{
4493 u8 mac[ETH_ALEN];
4494 int status;
4495
4496 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4497 status = be_cmd_get_perm_mac(adapter, mac);
4498 if (status)
4499 return status;
4500
4501 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4502 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304503 }
4504
Sathya Perla95046b92013-07-23 15:25:02 +05304505 return 0;
4506}
4507
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304508static void be_schedule_worker(struct be_adapter *adapter)
4509{
Sathya Perlab7172412016-07-27 05:26:18 -04004510 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304511 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4512}
4513
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304514static void be_destroy_err_recovery_workq(void)
4515{
4516 if (!be_err_recovery_workq)
4517 return;
4518
4519 flush_workqueue(be_err_recovery_workq);
4520 destroy_workqueue(be_err_recovery_workq);
4521 be_err_recovery_workq = NULL;
4522}
4523
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304524static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004525{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304526 struct be_error_recovery *err_rec = &adapter->error_recovery;
4527
4528 if (!be_err_recovery_workq)
4529 return;
4530
4531 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4532 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004533 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4534}
4535
Sathya Perla77071332013-08-27 16:57:34 +05304536static int be_setup_queues(struct be_adapter *adapter)
4537{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304538 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304539 int status;
4540
4541 status = be_evt_queues_create(adapter);
4542 if (status)
4543 goto err;
4544
4545 status = be_tx_qs_create(adapter);
4546 if (status)
4547 goto err;
4548
4549 status = be_rx_cqs_create(adapter);
4550 if (status)
4551 goto err;
4552
4553 status = be_mcc_queues_create(adapter);
4554 if (status)
4555 goto err;
4556
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304557 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4558 if (status)
4559 goto err;
4560
4561 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4562 if (status)
4563 goto err;
4564
Sathya Perla77071332013-08-27 16:57:34 +05304565 return 0;
4566err:
4567 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4568 return status;
4569}
4570
Ajit Khaparde62219062016-02-10 22:45:53 +05304571static int be_if_create(struct be_adapter *adapter)
4572{
4573 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4574 u32 cap_flags = be_if_cap_flags(adapter);
4575 int status;
4576
Sathya Perlab7172412016-07-27 05:26:18 -04004577 /* alloc required memory for other filtering fields */
4578 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4579 sizeof(*adapter->pmac_id), GFP_KERNEL);
4580 if (!adapter->pmac_id)
4581 return -ENOMEM;
4582
4583 adapter->mc_list = kcalloc(be_max_mc(adapter),
4584 sizeof(*adapter->mc_list), GFP_KERNEL);
4585 if (!adapter->mc_list)
4586 return -ENOMEM;
4587
4588 adapter->uc_list = kcalloc(be_max_uc(adapter),
4589 sizeof(*adapter->uc_list), GFP_KERNEL);
4590 if (!adapter->uc_list)
4591 return -ENOMEM;
4592
Sathya Perlae2617682016-06-22 08:54:54 -04004593 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304594 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4595
4596 en_flags &= cap_flags;
4597 /* will enable all the needed filter flags in be_open() */
4598 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4599 &adapter->if_handle, 0);
4600
Sathya Perlab7172412016-07-27 05:26:18 -04004601 if (status)
4602 return status;
4603
4604 return 0;
Ajit Khaparde62219062016-02-10 22:45:53 +05304605}
4606
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304607int be_update_queues(struct be_adapter *adapter)
4608{
4609 struct net_device *netdev = adapter->netdev;
4610 int status;
4611
4612 if (netif_running(netdev))
4613 be_close(netdev);
4614
4615 be_cancel_worker(adapter);
4616
4617 /* If any vectors have been shared with RoCE we cannot re-program
4618 * the MSIx table.
4619 */
4620 if (!adapter->num_msix_roce_vec)
4621 be_msix_disable(adapter);
4622
4623 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304624 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4625 if (status)
4626 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304627
4628 if (!msix_enabled(adapter)) {
4629 status = be_msix_enable(adapter);
4630 if (status)
4631 return status;
4632 }
4633
Ajit Khaparde62219062016-02-10 22:45:53 +05304634 status = be_if_create(adapter);
4635 if (status)
4636 return status;
4637
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304638 status = be_setup_queues(adapter);
4639 if (status)
4640 return status;
4641
4642 be_schedule_worker(adapter);
4643
4644 if (netif_running(netdev))
4645 status = be_open(netdev);
4646
4647 return status;
4648}
4649
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004650static inline int fw_major_num(const char *fw_ver)
4651{
4652 int fw_major = 0, i;
4653
4654 i = sscanf(fw_ver, "%d.", &fw_major);
4655 if (i != 1)
4656 return 0;
4657
4658 return fw_major;
4659}
4660
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304661/* If it is error recovery, FLR the PF
4662 * Else if any VFs are already enabled don't FLR the PF
4663 */
Sathya Perlaf962f842015-02-23 04:20:16 -05004664static bool be_reset_required(struct be_adapter *adapter)
4665{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304666 if (be_error_recovering(adapter))
4667 return true;
4668 else
4669 return pci_num_vf(adapter->pdev) == 0;
Sathya Perlaf962f842015-02-23 04:20:16 -05004670}
4671
4672/* Wait for the FW to be ready and perform the required initialization */
4673static int be_func_init(struct be_adapter *adapter)
4674{
4675 int status;
4676
4677 status = be_fw_wait_ready(adapter);
4678 if (status)
4679 return status;
4680
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304681 /* FW is now ready; clear errors to allow cmds/doorbell */
4682 be_clear_error(adapter, BE_CLEAR_ALL);
4683
Sathya Perlaf962f842015-02-23 04:20:16 -05004684 if (be_reset_required(adapter)) {
4685 status = be_cmd_reset_function(adapter);
4686 if (status)
4687 return status;
4688
4689 /* Wait for interrupts to quiesce after an FLR */
4690 msleep(100);
Sathya Perlaf962f842015-02-23 04:20:16 -05004691 }
4692
4693 /* Tell FW we're ready to fire cmds */
4694 status = be_cmd_fw_init(adapter);
4695 if (status)
4696 return status;
4697
4698 /* Allow interrupts for other ULPs running on NIC function */
4699 be_intr_set(adapter, true);
4700
4701 return 0;
4702}
4703
Sathya Perla5fb379e2009-06-18 00:02:59 +00004704static int be_setup(struct be_adapter *adapter)
4705{
Sathya Perla39f1d942012-05-08 19:41:24 +00004706 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004707 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004708
Sathya Perlaf962f842015-02-23 04:20:16 -05004709 status = be_func_init(adapter);
4710 if (status)
4711 return status;
4712
Sathya Perla30128032011-11-10 19:17:57 +00004713 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004714
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004715 if (!lancer_chip(adapter))
4716 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004717
Suresh Reddy980df242015-12-30 01:29:03 -05004718 /* invoke this cmd first to get pf_num and vf_num which are needed
4719 * for issuing profile related cmds
4720 */
4721 if (!BEx_chip(adapter)) {
4722 status = be_cmd_get_func_config(adapter, NULL);
4723 if (status)
4724 return status;
4725 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004726
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004727 status = be_get_config(adapter);
4728 if (status)
4729 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004730
Somnath Koturde2b1e02016-06-06 07:22:10 -04004731 if (!BE2_chip(adapter) && be_physfn(adapter))
4732 be_alloc_sriov_res(adapter);
4733
4734 status = be_get_resources(adapter);
4735 if (status)
4736 goto err;
4737
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004738 status = be_msix_enable(adapter);
4739 if (status)
4740 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004741
Kalesh APbcc84142015-08-05 03:27:48 -04004742 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304743 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004744 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004745 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004746
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304747 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4748 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304749 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304750 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004751 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004752 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004753
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004754 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004755
Sathya Perla95046b92013-07-23 15:25:02 +05304756 status = be_mac_setup(adapter);
4757 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004758 goto err;
4759
Kalesh APe97e3cd2014-07-17 16:20:26 +05304760 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304761 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004762
Somnath Koture9e2a902013-10-24 14:37:53 +05304763 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304764 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304765 adapter->fw_ver);
4766 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4767 }
4768
Kalesh AP00d594c2015-01-20 03:51:44 -05004769 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4770 adapter->rx_fc);
4771 if (status)
4772 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4773 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004774
Kalesh AP00d594c2015-01-20 03:51:44 -05004775 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4776 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004777
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304778 if (be_physfn(adapter))
4779 be_cmd_set_logical_link_config(adapter,
4780 IFLA_VF_LINK_STATE_AUTO, 0);
4781
Somnath Kotur884476b2016-06-22 08:54:55 -04004782 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4783 * confusing a linux bridge or OVS that it might be connected to.
4784 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4785 * when SRIOV is not enabled.
4786 */
4787 if (BE3_chip(adapter))
4788 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4789 PORT_FWD_TYPE_PASSTHRU, 0);
4790
Vasundhara Volambec84e62014-06-30 13:01:32 +05304791 if (adapter->num_vfs)
4792 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004793
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004794 status = be_cmd_get_phy_info(adapter);
4795 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004796 adapter->phy.fc_autoneg = 1;
4797
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304798 if (be_physfn(adapter) && !lancer_chip(adapter))
4799 be_cmd_set_features(adapter);
4800
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304801 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304802 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004803 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004804err:
4805 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004806 return status;
4807}
4808
Ivan Vecera66268732011-12-08 01:31:21 +00004809#ifdef CONFIG_NET_POLL_CONTROLLER
4810static void be_netpoll(struct net_device *netdev)
4811{
4812 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004813 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004814 int i;
4815
Sathya Perlae49cc342012-11-27 19:50:02 +00004816 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004817 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004818 napi_schedule(&eqo->napi);
4819 }
Ivan Vecera66268732011-12-08 01:31:21 +00004820}
4821#endif
4822
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004823int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4824{
4825 const struct firmware *fw;
4826 int status;
4827
4828 if (!netif_running(adapter->netdev)) {
4829 dev_err(&adapter->pdev->dev,
4830 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304831 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004832 }
4833
4834 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4835 if (status)
4836 goto fw_exit;
4837
4838 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4839
4840 if (lancer_chip(adapter))
4841 status = lancer_fw_download(adapter, fw);
4842 else
4843 status = be_fw_download(adapter, fw);
4844
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004845 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304846 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004847
Ajit Khaparde84517482009-09-04 03:12:16 +00004848fw_exit:
4849 release_firmware(fw);
4850 return status;
4851}
4852
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004853static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4854 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004855{
4856 struct be_adapter *adapter = netdev_priv(dev);
4857 struct nlattr *attr, *br_spec;
4858 int rem;
4859 int status = 0;
4860 u16 mode = 0;
4861
4862 if (!sriov_enabled(adapter))
4863 return -EOPNOTSUPP;
4864
4865 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004866 if (!br_spec)
4867 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004868
4869 nla_for_each_nested(attr, br_spec, rem) {
4870 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4871 continue;
4872
Thomas Grafb7c1a312014-11-26 13:42:17 +01004873 if (nla_len(attr) < sizeof(mode))
4874 return -EINVAL;
4875
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004876 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004877 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4878 return -EOPNOTSUPP;
4879
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004880 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4881 return -EINVAL;
4882
4883 status = be_cmd_set_hsw_config(adapter, 0, 0,
4884 adapter->if_handle,
4885 mode == BRIDGE_MODE_VEPA ?
4886 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004887 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004888 if (status)
4889 goto err;
4890
4891 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4892 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4893
4894 return status;
4895 }
4896err:
4897 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4898 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4899
4900 return status;
4901}
4902
4903static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004904 struct net_device *dev, u32 filter_mask,
4905 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004906{
4907 struct be_adapter *adapter = netdev_priv(dev);
4908 int status = 0;
4909 u8 hsw_mode;
4910
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004911 /* BE and Lancer chips support VEB mode only */
4912 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004913 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4914 if (!pci_sriov_get_totalvfs(adapter->pdev))
4915 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004916 hsw_mode = PORT_FWD_TYPE_VEB;
4917 } else {
4918 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004919 adapter->if_handle, &hsw_mode,
4920 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004921 if (status)
4922 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004923
4924 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4925 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004926 }
4927
4928 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4929 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004930 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004931 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004932}
4933
Sathya Perlab7172412016-07-27 05:26:18 -04004934static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
4935 void (*func)(struct work_struct *))
4936{
4937 struct be_cmd_work *work;
4938
4939 work = kzalloc(sizeof(*work), GFP_ATOMIC);
4940 if (!work) {
4941 dev_err(&adapter->pdev->dev,
4942 "be_work memory allocation failed\n");
4943 return NULL;
4944 }
4945
4946 INIT_WORK(&work->work, func);
4947 work->adapter = adapter;
4948 return work;
4949}
4950
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004951/* VxLAN offload Notes:
4952 *
4953 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4954 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4955 * is expected to work across all types of IP tunnels once exported. Skyhawk
4956 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304957 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4958 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4959 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004960 *
4961 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4962 * adds more than one port, disable offloads and don't re-enable them again
4963 * until after all the tunnels are removed.
4964 */
Sathya Perlab7172412016-07-27 05:26:18 -04004965static void be_work_add_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05304966{
Sathya Perlab7172412016-07-27 05:26:18 -04004967 struct be_cmd_work *cmd_work =
4968 container_of(work, struct be_cmd_work, work);
4969 struct be_adapter *adapter = cmd_work->adapter;
4970 struct net_device *netdev = adapter->netdev;
Sathya Perlac9c47142014-03-27 10:46:19 +05304971 struct device *dev = &adapter->pdev->dev;
Sathya Perlab7172412016-07-27 05:26:18 -04004972 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05304973 int status;
4974
Jiri Benc1e5b3112015-09-17 16:11:13 +02004975 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4976 adapter->vxlan_port_aliases++;
Sathya Perlab7172412016-07-27 05:26:18 -04004977 goto done;
Jiri Benc1e5b3112015-09-17 16:11:13 +02004978 }
4979
Sathya Perlac9c47142014-03-27 10:46:19 +05304980 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304981 dev_info(dev,
4982 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004983 dev_info(dev, "Disabling VxLAN offloads\n");
4984 adapter->vxlan_port_count++;
4985 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304986 }
4987
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004988 if (adapter->vxlan_port_count++ >= 1)
Sathya Perlab7172412016-07-27 05:26:18 -04004989 goto done;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004990
Sathya Perlac9c47142014-03-27 10:46:19 +05304991 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4992 OP_CONVERT_NORMAL_TO_TUNNEL);
4993 if (status) {
4994 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4995 goto err;
4996 }
4997
4998 status = be_cmd_set_vxlan_port(adapter, port);
4999 if (status) {
5000 dev_warn(dev, "Failed to add VxLAN port\n");
5001 goto err;
5002 }
5003 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5004 adapter->vxlan_port = port;
5005
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005006 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5007 NETIF_F_TSO | NETIF_F_TSO6 |
5008 NETIF_F_GSO_UDP_TUNNEL;
5009 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305010 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005011
Sathya Perlac9c47142014-03-27 10:46:19 +05305012 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5013 be16_to_cpu(port));
Sathya Perlab7172412016-07-27 05:26:18 -04005014 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305015err:
5016 be_disable_vxlan_offloads(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04005017done:
5018 kfree(cmd_work);
Sathya Perlac9c47142014-03-27 10:46:19 +05305019}
5020
Sathya Perlab7172412016-07-27 05:26:18 -04005021static void be_work_del_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305022{
Sathya Perlab7172412016-07-27 05:26:18 -04005023 struct be_cmd_work *cmd_work =
5024 container_of(work, struct be_cmd_work, work);
5025 struct be_adapter *adapter = cmd_work->adapter;
5026 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305027
5028 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005029 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305030
Jiri Benc1e5b3112015-09-17 16:11:13 +02005031 if (adapter->vxlan_port_aliases) {
5032 adapter->vxlan_port_aliases--;
Sathya Perlab7172412016-07-27 05:26:18 -04005033 goto out;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005034 }
5035
Sathya Perlac9c47142014-03-27 10:46:19 +05305036 be_disable_vxlan_offloads(adapter);
5037
5038 dev_info(&adapter->pdev->dev,
5039 "Disabled VxLAN offloads for UDP port %d\n",
5040 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005041done:
5042 adapter->vxlan_port_count--;
Sathya Perlab7172412016-07-27 05:26:18 -04005043out:
5044 kfree(cmd_work);
5045}
5046
5047static void be_cfg_vxlan_port(struct net_device *netdev,
5048 struct udp_tunnel_info *ti,
5049 void (*func)(struct work_struct *))
5050{
5051 struct be_adapter *adapter = netdev_priv(netdev);
5052 struct be_cmd_work *cmd_work;
5053
5054 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5055 return;
5056
5057 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5058 return;
5059
5060 cmd_work = be_alloc_work(adapter, func);
5061 if (cmd_work) {
5062 cmd_work->info.vxlan_port = ti->port;
5063 queue_work(be_wq, &cmd_work->work);
5064 }
5065}
5066
5067static void be_del_vxlan_port(struct net_device *netdev,
5068 struct udp_tunnel_info *ti)
5069{
5070 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5071}
5072
5073static void be_add_vxlan_port(struct net_device *netdev,
5074 struct udp_tunnel_info *ti)
5075{
5076 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
Sathya Perlac9c47142014-03-27 10:46:19 +05305077}
Joe Stringer725d5482014-11-13 16:38:13 -08005078
Jesse Gross5f352272014-12-23 22:37:26 -08005079static netdev_features_t be_features_check(struct sk_buff *skb,
5080 struct net_device *dev,
5081 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005082{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305083 struct be_adapter *adapter = netdev_priv(dev);
5084 u8 l4_hdr = 0;
5085
5086 /* The code below restricts offload features for some tunneled packets.
5087 * Offload features for normal (non tunnel) packets are unchanged.
5088 */
5089 if (!skb->encapsulation ||
5090 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5091 return features;
5092
5093 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5094 * should disable tunnel offload features if it's not a VxLAN packet,
5095 * as tunnel offloads have been enabled only for VxLAN. This is done to
5096 * allow other tunneled traffic like GRE work fine while VxLAN
5097 * offloads are configured in Skyhawk-R.
5098 */
5099 switch (vlan_get_protocol(skb)) {
5100 case htons(ETH_P_IP):
5101 l4_hdr = ip_hdr(skb)->protocol;
5102 break;
5103 case htons(ETH_P_IPV6):
5104 l4_hdr = ipv6_hdr(skb)->nexthdr;
5105 break;
5106 default:
5107 return features;
5108 }
5109
5110 if (l4_hdr != IPPROTO_UDP ||
5111 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5112 skb->inner_protocol != htons(ETH_P_TEB) ||
5113 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5114 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08005115 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305116
5117 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005118}
Sathya Perlac9c47142014-03-27 10:46:19 +05305119
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305120static int be_get_phys_port_id(struct net_device *dev,
5121 struct netdev_phys_item_id *ppid)
5122{
5123 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5124 struct be_adapter *adapter = netdev_priv(dev);
5125 u8 *id;
5126
5127 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5128 return -ENOSPC;
5129
5130 ppid->id[0] = adapter->hba_port_num + 1;
5131 id = &ppid->id[1];
5132 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5133 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5134 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5135
5136 ppid->id_len = id_len;
5137
5138 return 0;
5139}
5140
Sathya Perlab7172412016-07-27 05:26:18 -04005141static void be_set_rx_mode(struct net_device *dev)
5142{
5143 struct be_adapter *adapter = netdev_priv(dev);
5144 struct be_cmd_work *work;
5145
5146 work = be_alloc_work(adapter, be_work_set_rx_mode);
5147 if (work)
5148 queue_work(be_wq, &work->work);
5149}
5150
stephen hemmingere5686ad2012-01-05 19:10:25 +00005151static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005152 .ndo_open = be_open,
5153 .ndo_stop = be_close,
5154 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005155 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005156 .ndo_set_mac_address = be_mac_addr_set,
5157 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005158 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005159 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005160 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5161 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005162 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005163 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005164 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005165 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305166 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005167 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005168#ifdef CONFIG_NET_POLL_CONTROLLER
5169 .ndo_poll_controller = be_netpoll,
5170#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005171 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5172 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305173#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305174 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305175#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005176 .ndo_udp_tunnel_add = be_add_vxlan_port,
5177 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005178 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305179 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005180};
5181
5182static void be_netdev_init(struct net_device *netdev)
5183{
5184 struct be_adapter *adapter = netdev_priv(netdev);
5185
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005186 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005187 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005188 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305189 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005190 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005191
5192 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005193 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005194
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005195 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005196 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005197
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005198 netdev->priv_flags |= IFF_UNICAST_FLT;
5199
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005200 netdev->flags |= IFF_MULTICAST;
5201
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305202 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005203
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005204 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005205
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005206 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005207}
5208
Kalesh AP87ac1a52015-02-23 04:20:15 -05005209static void be_cleanup(struct be_adapter *adapter)
5210{
5211 struct net_device *netdev = adapter->netdev;
5212
5213 rtnl_lock();
5214 netif_device_detach(netdev);
5215 if (netif_running(netdev))
5216 be_close(netdev);
5217 rtnl_unlock();
5218
5219 be_clear(adapter);
5220}
5221
Kalesh AP484d76f2015-02-23 04:20:14 -05005222static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005223{
Kalesh APd0e1b312015-02-23 04:20:12 -05005224 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005225 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005226
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005227 status = be_setup(adapter);
5228 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005229 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005230
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005231 rtnl_lock();
5232 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005233 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005234 rtnl_unlock();
5235
5236 if (status)
5237 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005238
Kalesh APd0e1b312015-02-23 04:20:12 -05005239 netif_device_attach(netdev);
5240
Kalesh AP484d76f2015-02-23 04:20:14 -05005241 return 0;
5242}
5243
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305244static void be_soft_reset(struct be_adapter *adapter)
5245{
5246 u32 val;
5247
5248 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5249 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5250 val |= SLIPORT_SOFTRESET_SR_MASK;
5251 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5252}
5253
5254static bool be_err_is_recoverable(struct be_adapter *adapter)
5255{
5256 struct be_error_recovery *err_rec = &adapter->error_recovery;
5257 unsigned long initial_idle_time =
5258 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5259 unsigned long recovery_interval =
5260 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5261 u16 ue_err_code;
5262 u32 val;
5263
5264 val = be_POST_stage_get(adapter);
5265 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5266 return false;
5267 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5268 if (ue_err_code == 0)
5269 return false;
5270
5271 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5272 ue_err_code);
5273
5274 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5275 dev_err(&adapter->pdev->dev,
5276 "Cannot recover within %lu sec from driver load\n",
5277 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5278 return false;
5279 }
5280
5281 if (err_rec->last_recovery_time &&
5282 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5283 dev_err(&adapter->pdev->dev,
5284 "Cannot recover within %lu sec from last recovery\n",
5285 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5286 return false;
5287 }
5288
5289 if (ue_err_code == err_rec->last_err_code) {
5290 dev_err(&adapter->pdev->dev,
5291 "Cannot recover from a consecutive TPE error\n");
5292 return false;
5293 }
5294
5295 err_rec->last_recovery_time = jiffies;
5296 err_rec->last_err_code = ue_err_code;
5297 return true;
5298}
5299
5300static int be_tpe_recover(struct be_adapter *adapter)
5301{
5302 struct be_error_recovery *err_rec = &adapter->error_recovery;
5303 int status = -EAGAIN;
5304 u32 val;
5305
5306 switch (err_rec->recovery_state) {
5307 case ERR_RECOVERY_ST_NONE:
5308 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5309 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5310 break;
5311
5312 case ERR_RECOVERY_ST_DETECT:
5313 val = be_POST_stage_get(adapter);
5314 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5315 POST_STAGE_RECOVERABLE_ERR) {
5316 dev_err(&adapter->pdev->dev,
5317 "Unrecoverable HW error detected: 0x%x\n", val);
5318 status = -EINVAL;
5319 err_rec->resched_delay = 0;
5320 break;
5321 }
5322
5323 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5324
5325 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5326 * milliseconds before it checks for final error status in
5327 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5328 * If it does, then PF0 initiates a Soft Reset.
5329 */
5330 if (adapter->pf_num == 0) {
5331 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5332 err_rec->resched_delay = err_rec->ue_to_reset_time -
5333 ERR_RECOVERY_UE_DETECT_DURATION;
5334 break;
5335 }
5336
5337 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5338 err_rec->resched_delay = err_rec->ue_to_poll_time -
5339 ERR_RECOVERY_UE_DETECT_DURATION;
5340 break;
5341
5342 case ERR_RECOVERY_ST_RESET:
5343 if (!be_err_is_recoverable(adapter)) {
5344 dev_err(&adapter->pdev->dev,
5345 "Failed to meet recovery criteria\n");
5346 status = -EIO;
5347 err_rec->resched_delay = 0;
5348 break;
5349 }
5350 be_soft_reset(adapter);
5351 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5352 err_rec->resched_delay = err_rec->ue_to_poll_time -
5353 err_rec->ue_to_reset_time;
5354 break;
5355
5356 case ERR_RECOVERY_ST_PRE_POLL:
5357 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5358 err_rec->resched_delay = 0;
5359 status = 0; /* done */
5360 break;
5361
5362 default:
5363 status = -EINVAL;
5364 err_rec->resched_delay = 0;
5365 break;
5366 }
5367
5368 return status;
5369}
5370
Kalesh AP484d76f2015-02-23 04:20:14 -05005371static int be_err_recover(struct be_adapter *adapter)
5372{
Kalesh AP484d76f2015-02-23 04:20:14 -05005373 int status;
5374
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305375 if (!lancer_chip(adapter)) {
5376 if (!adapter->error_recovery.recovery_supported ||
5377 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5378 return -EIO;
5379 status = be_tpe_recover(adapter);
5380 if (status)
5381 goto err;
5382 }
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305383
5384 /* Wait for adapter to reach quiescent state before
5385 * destroying queues
5386 */
5387 status = be_fw_wait_ready(adapter);
5388 if (status)
5389 goto err;
5390
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305391 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5392
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305393 be_cleanup(adapter);
5394
Kalesh AP484d76f2015-02-23 04:20:14 -05005395 status = be_resume(adapter);
5396 if (status)
5397 goto err;
5398
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305399 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5400
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005401err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005402 return status;
5403}
5404
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005405static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005406{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305407 struct be_error_recovery *err_rec =
5408 container_of(work, struct be_error_recovery,
5409 err_detection_work.work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005410 struct be_adapter *adapter =
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305411 container_of(err_rec, struct be_adapter,
5412 error_recovery);
5413 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305414 struct device *dev = &adapter->pdev->dev;
5415 int recovery_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005416
5417 be_detect_error(adapter);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305418 if (!be_check_error(adapter, BE_ERROR_HW))
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305419 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005420
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305421 recovery_status = be_err_recover(adapter);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305422 if (!recovery_status) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305423 err_rec->recovery_retries = 0;
5424 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305425 dev_info(dev, "Adapter recovery successful\n");
5426 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305427 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5428 /* BEx/SH recovery state machine */
5429 if (adapter->pf_num == 0 &&
5430 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5431 dev_err(&adapter->pdev->dev,
5432 "Adapter recovery in progress\n");
5433 resched_delay = err_rec->resched_delay;
5434 goto reschedule_task;
5435 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305436 /* For VFs, check if PF have allocated resources
5437 * every second.
5438 */
5439 dev_err(dev, "Re-trying adapter recovery\n");
5440 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305441 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5442 ERR_RECOVERY_MAX_RETRY_COUNT) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305443 /* In case of another error during recovery, it takes 30 sec
5444 * for adapter to come out of error. Retry error recovery after
5445 * this time interval.
5446 */
5447 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305448 resched_delay = ERR_RECOVERY_RETRY_DELAY;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305449 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305450 } else {
5451 dev_err(dev, "Adapter recovery failed\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305452 dev_err(dev, "Please reboot server to recover\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005453 }
5454
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305455 return;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305456
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305457reschedule_task:
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305458 be_schedule_err_detection(adapter, resched_delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005459}
5460
Vasundhara Volam21252372015-02-06 08:18:42 -05005461static void be_log_sfp_info(struct be_adapter *adapter)
5462{
5463 int status;
5464
5465 status = be_cmd_query_sfp_info(adapter);
5466 if (!status) {
5467 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305468 "Port %c: %s Vendor: %s part no: %s",
5469 adapter->port_name,
5470 be_misconfig_evt_port_state[adapter->phy_state],
5471 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005472 adapter->phy.vendor_pn);
5473 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305474 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005475}
5476
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005477static void be_worker(struct work_struct *work)
5478{
5479 struct be_adapter *adapter =
5480 container_of(work, struct be_adapter, work.work);
5481 struct be_rx_obj *rxo;
5482 int i;
5483
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005484 if (be_physfn(adapter) &&
5485 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5486 be_cmd_get_die_temperature(adapter);
5487
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005488 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005489 * mcc completions
5490 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005491 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005492 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005493 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005494 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005495 goto reschedule;
5496 }
5497
5498 if (!adapter->stats_cmd_sent) {
5499 if (lancer_chip(adapter))
5500 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305501 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005502 else
5503 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5504 }
5505
5506 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305507 /* Replenish RX-queues starved due to memory
5508 * allocation failures.
5509 */
5510 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305511 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005512 }
5513
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005514 /* EQ-delay update for Skyhawk is done while notifying EQ */
5515 if (!skyhawk_chip(adapter))
5516 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005517
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305518 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005519 be_log_sfp_info(adapter);
5520
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005521reschedule:
5522 adapter->work_counter++;
Sathya Perlab7172412016-07-27 05:26:18 -04005523 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005524}
5525
Sathya Perla78fad34e2015-02-23 04:20:08 -05005526static void be_unmap_pci_bars(struct be_adapter *adapter)
5527{
5528 if (adapter->csr)
5529 pci_iounmap(adapter->pdev, adapter->csr);
5530 if (adapter->db)
5531 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005532 if (adapter->pcicfg && adapter->pcicfg_mapped)
5533 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005534}
5535
5536static int db_bar(struct be_adapter *adapter)
5537{
Kalesh AP18c57c72015-05-06 05:30:38 -04005538 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005539 return 0;
5540 else
5541 return 4;
5542}
5543
5544static int be_roce_map_pci_bars(struct be_adapter *adapter)
5545{
5546 if (skyhawk_chip(adapter)) {
5547 adapter->roce_db.size = 4096;
5548 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5549 db_bar(adapter));
5550 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5551 db_bar(adapter));
5552 }
5553 return 0;
5554}
5555
5556static int be_map_pci_bars(struct be_adapter *adapter)
5557{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005558 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005559 u8 __iomem *addr;
5560 u32 sli_intf;
5561
5562 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5563 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5564 SLI_INTF_FAMILY_SHIFT;
5565 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5566
5567 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005568 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005569 if (!adapter->csr)
5570 return -ENOMEM;
5571 }
5572
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005573 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005574 if (!addr)
5575 goto pci_map_err;
5576 adapter->db = addr;
5577
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005578 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5579 if (be_physfn(adapter)) {
5580 /* PCICFG is the 2nd BAR in BE2 */
5581 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5582 if (!addr)
5583 goto pci_map_err;
5584 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005585 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005586 } else {
5587 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005588 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005589 }
5590 }
5591
Sathya Perla78fad34e2015-02-23 04:20:08 -05005592 be_roce_map_pci_bars(adapter);
5593 return 0;
5594
5595pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005596 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005597 be_unmap_pci_bars(adapter);
5598 return -ENOMEM;
5599}
5600
5601static void be_drv_cleanup(struct be_adapter *adapter)
5602{
5603 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5604 struct device *dev = &adapter->pdev->dev;
5605
5606 if (mem->va)
5607 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5608
5609 mem = &adapter->rx_filter;
5610 if (mem->va)
5611 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5612
5613 mem = &adapter->stats_cmd;
5614 if (mem->va)
5615 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5616}
5617
5618/* Allocate and initialize various fields in be_adapter struct */
5619static int be_drv_init(struct be_adapter *adapter)
5620{
5621 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5622 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5623 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5624 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5625 struct device *dev = &adapter->pdev->dev;
5626 int status = 0;
5627
5628 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305629 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5630 &mbox_mem_alloc->dma,
5631 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005632 if (!mbox_mem_alloc->va)
5633 return -ENOMEM;
5634
5635 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5636 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5637 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005638
5639 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5640 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5641 &rx_filter->dma, GFP_KERNEL);
5642 if (!rx_filter->va) {
5643 status = -ENOMEM;
5644 goto free_mbox;
5645 }
5646
5647 if (lancer_chip(adapter))
5648 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5649 else if (BE2_chip(adapter))
5650 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5651 else if (BE3_chip(adapter))
5652 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5653 else
5654 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5655 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5656 &stats_cmd->dma, GFP_KERNEL);
5657 if (!stats_cmd->va) {
5658 status = -ENOMEM;
5659 goto free_rx_filter;
5660 }
5661
5662 mutex_init(&adapter->mbox_lock);
Sathya Perlab7172412016-07-27 05:26:18 -04005663 mutex_init(&adapter->mcc_lock);
5664 mutex_init(&adapter->rx_filter_lock);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005665 spin_lock_init(&adapter->mcc_cq_lock);
5666 init_completion(&adapter->et_cmd_compl);
5667
5668 pci_save_state(adapter->pdev);
5669
5670 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305671
5672 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5673 adapter->error_recovery.resched_delay = 0;
5674 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005675 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005676
5677 adapter->rx_fc = true;
5678 adapter->tx_fc = true;
5679
5680 /* Must be a power of 2 or else MODULO will BUG_ON */
5681 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005682
5683 return 0;
5684
5685free_rx_filter:
5686 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5687free_mbox:
5688 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5689 mbox_mem_alloc->dma);
5690 return status;
5691}
5692
5693static void be_remove(struct pci_dev *pdev)
5694{
5695 struct be_adapter *adapter = pci_get_drvdata(pdev);
5696
5697 if (!adapter)
5698 return;
5699
5700 be_roce_dev_remove(adapter);
5701 be_intr_set(adapter, false);
5702
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005703 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005704
5705 unregister_netdev(adapter->netdev);
5706
5707 be_clear(adapter);
5708
Somnath Koturf72099e2016-09-07 19:57:50 +05305709 if (!pci_vfs_assigned(adapter->pdev))
5710 be_cmd_reset_function(adapter);
5711
Sathya Perla78fad34e2015-02-23 04:20:08 -05005712 /* tell fw we're done with firing cmds */
5713 be_cmd_fw_clean(adapter);
5714
5715 be_unmap_pci_bars(adapter);
5716 be_drv_cleanup(adapter);
5717
5718 pci_disable_pcie_error_reporting(pdev);
5719
5720 pci_release_regions(pdev);
5721 pci_disable_device(pdev);
5722
5723 free_netdev(adapter->netdev);
5724}
5725
Arnd Bergmann9a032592015-05-18 23:06:45 +02005726static ssize_t be_hwmon_show_temp(struct device *dev,
5727 struct device_attribute *dev_attr,
5728 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305729{
5730 struct be_adapter *adapter = dev_get_drvdata(dev);
5731
5732 /* Unit: millidegree Celsius */
5733 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5734 return -EIO;
5735 else
5736 return sprintf(buf, "%u\n",
5737 adapter->hwmon_info.be_on_die_temp * 1000);
5738}
5739
5740static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5741 be_hwmon_show_temp, NULL, 1);
5742
5743static struct attribute *be_hwmon_attrs[] = {
5744 &sensor_dev_attr_temp1_input.dev_attr.attr,
5745 NULL
5746};
5747
5748ATTRIBUTE_GROUPS(be_hwmon);
5749
Sathya Perlad3791422012-09-28 04:39:44 +00005750static char *mc_name(struct be_adapter *adapter)
5751{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305752 char *str = ""; /* default */
5753
5754 switch (adapter->mc_type) {
5755 case UMC:
5756 str = "UMC";
5757 break;
5758 case FLEX10:
5759 str = "FLEX10";
5760 break;
5761 case vNIC1:
5762 str = "vNIC-1";
5763 break;
5764 case nPAR:
5765 str = "nPAR";
5766 break;
5767 case UFP:
5768 str = "UFP";
5769 break;
5770 case vNIC2:
5771 str = "vNIC-2";
5772 break;
5773 default:
5774 str = "";
5775 }
5776
5777 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005778}
5779
5780static inline char *func_name(struct be_adapter *adapter)
5781{
5782 return be_physfn(adapter) ? "PF" : "VF";
5783}
5784
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005785static inline char *nic_name(struct pci_dev *pdev)
5786{
5787 switch (pdev->device) {
5788 case OC_DEVICE_ID1:
5789 return OC_NAME;
5790 case OC_DEVICE_ID2:
5791 return OC_NAME_BE;
5792 case OC_DEVICE_ID3:
5793 case OC_DEVICE_ID4:
5794 return OC_NAME_LANCER;
5795 case BE_DEVICE_ID2:
5796 return BE3_NAME;
5797 case OC_DEVICE_ID5:
5798 case OC_DEVICE_ID6:
5799 return OC_NAME_SH;
5800 default:
5801 return BE_NAME;
5802 }
5803}
5804
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005805static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005806{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005807 struct be_adapter *adapter;
5808 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005809 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005810
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305811 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5812
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005813 status = pci_enable_device(pdev);
5814 if (status)
5815 goto do_none;
5816
5817 status = pci_request_regions(pdev, DRV_NAME);
5818 if (status)
5819 goto disable_dev;
5820 pci_set_master(pdev);
5821
Sathya Perla7f640062012-06-05 19:37:20 +00005822 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305823 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005824 status = -ENOMEM;
5825 goto rel_reg;
5826 }
5827 adapter = netdev_priv(netdev);
5828 adapter->pdev = pdev;
5829 pci_set_drvdata(pdev, adapter);
5830 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005831 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005832
Russell King4c15c242013-06-26 23:49:11 +01005833 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005834 if (!status) {
5835 netdev->features |= NETIF_F_HIGHDMA;
5836 } else {
Russell King4c15c242013-06-26 23:49:11 +01005837 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005838 if (status) {
5839 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5840 goto free_netdev;
5841 }
5842 }
5843
Kalesh AP2f951a92014-09-12 17:39:21 +05305844 status = pci_enable_pcie_error_reporting(pdev);
5845 if (!status)
5846 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005847
Sathya Perla78fad34e2015-02-23 04:20:08 -05005848 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005849 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005850 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005851
Sathya Perla78fad34e2015-02-23 04:20:08 -05005852 status = be_drv_init(adapter);
5853 if (status)
5854 goto unmap_bars;
5855
Sathya Perla5fb379e2009-06-18 00:02:59 +00005856 status = be_setup(adapter);
5857 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005858 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005859
Sathya Perla3abcded2010-10-03 22:12:27 -07005860 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005861 status = register_netdev(netdev);
5862 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005863 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005864
Parav Pandit045508a2012-03-26 14:27:13 +00005865 be_roce_dev_add(adapter);
5866
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305867 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305868 adapter->error_recovery.probe_time = jiffies;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005869
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305870 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005871 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305872 adapter->hwmon_info.hwmon_dev =
5873 devm_hwmon_device_register_with_groups(&pdev->dev,
5874 DRV_NAME,
5875 adapter,
5876 be_hwmon_groups);
5877 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5878 }
5879
Sathya Perlad3791422012-09-28 04:39:44 +00005880 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005881 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005882
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005883 return 0;
5884
Sathya Perla5fb379e2009-06-18 00:02:59 +00005885unsetup:
5886 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005887drv_cleanup:
5888 be_drv_cleanup(adapter);
5889unmap_bars:
5890 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005891free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005892 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005893rel_reg:
5894 pci_release_regions(pdev);
5895disable_dev:
5896 pci_disable_device(pdev);
5897do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005898 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005899 return status;
5900}
5901
5902static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5903{
5904 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005905
Ajit Khaparded4360d62013-11-22 12:51:09 -06005906 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005907 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005908
Kalesh AP87ac1a52015-02-23 04:20:15 -05005909 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005910
5911 pci_save_state(pdev);
5912 pci_disable_device(pdev);
5913 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5914 return 0;
5915}
5916
Kalesh AP484d76f2015-02-23 04:20:14 -05005917static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005918{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005919 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005920 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005921
5922 status = pci_enable_device(pdev);
5923 if (status)
5924 return status;
5925
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005926 pci_restore_state(pdev);
5927
Kalesh AP484d76f2015-02-23 04:20:14 -05005928 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005929 if (status)
5930 return status;
5931
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305932 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005933
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005934 return 0;
5935}
5936
Sathya Perla82456b02010-02-17 01:35:37 +00005937/*
5938 * An FLR will stop BE from DMAing any data.
5939 */
5940static void be_shutdown(struct pci_dev *pdev)
5941{
5942 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005943
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005944 if (!adapter)
5945 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005946
Devesh Sharmad114f992014-06-10 19:32:15 +05305947 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005948 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005949 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005950
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005951 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005952
Ajit Khaparde57841862011-04-06 18:08:43 +00005953 be_cmd_reset_function(adapter);
5954
Sathya Perla82456b02010-02-17 01:35:37 +00005955 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005956}
5957
Sathya Perlacf588472010-02-14 21:22:01 +00005958static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305959 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005960{
5961 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005962
5963 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5964
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05305965 be_roce_dev_remove(adapter);
5966
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305967 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5968 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005969
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005970 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005971
Kalesh AP87ac1a52015-02-23 04:20:15 -05005972 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005973 }
Sathya Perlacf588472010-02-14 21:22:01 +00005974
5975 if (state == pci_channel_io_perm_failure)
5976 return PCI_ERS_RESULT_DISCONNECT;
5977
5978 pci_disable_device(pdev);
5979
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005980 /* The error could cause the FW to trigger a flash debug dump.
5981 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005982 * can cause it not to recover; wait for it to finish.
5983 * Wait only for first function as it is needed only once per
5984 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005985 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005986 if (pdev->devfn == 0)
5987 ssleep(30);
5988
Sathya Perlacf588472010-02-14 21:22:01 +00005989 return PCI_ERS_RESULT_NEED_RESET;
5990}
5991
5992static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5993{
5994 struct be_adapter *adapter = pci_get_drvdata(pdev);
5995 int status;
5996
5997 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005998
5999 status = pci_enable_device(pdev);
6000 if (status)
6001 return PCI_ERS_RESULT_DISCONNECT;
6002
6003 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006004 pci_restore_state(pdev);
6005
6006 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00006007 dev_info(&adapter->pdev->dev,
6008 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006009 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006010 if (status)
6011 return PCI_ERS_RESULT_DISCONNECT;
6012
Sathya Perlad6b6d982012-09-05 01:56:48 +00006013 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306014 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00006015 return PCI_ERS_RESULT_RECOVERED;
6016}
6017
6018static void be_eeh_resume(struct pci_dev *pdev)
6019{
6020 int status = 0;
6021 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006022
6023 dev_info(&adapter->pdev->dev, "EEH resume\n");
6024
6025 pci_save_state(pdev);
6026
Kalesh AP484d76f2015-02-23 04:20:14 -05006027 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006028 if (status)
6029 goto err;
6030
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306031 be_roce_dev_add(adapter);
6032
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306033 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00006034 return;
6035err:
6036 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006037}
6038
Vasundhara Volamace40af2015-03-04 00:44:34 -05006039static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6040{
6041 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006042 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05006043 int status;
6044
6045 if (!num_vfs)
6046 be_vf_clear(adapter);
6047
6048 adapter->num_vfs = num_vfs;
6049
6050 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6051 dev_warn(&pdev->dev,
6052 "Cannot disable VFs while they are assigned\n");
6053 return -EBUSY;
6054 }
6055
6056 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6057 * are equally distributed across the max-number of VFs. The user may
6058 * request only a subset of the max-vfs to be enabled.
6059 * Based on num_vfs, redistribute the resources across num_vfs so that
6060 * each VF will have access to more number of resources.
6061 * This facility is not available in BE3 FW.
6062 * Also, this is done by FW in Lancer chip.
6063 */
6064 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006065 be_calculate_vf_res(adapter, adapter->num_vfs,
6066 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006067 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006068 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006069 if (status)
6070 dev_err(&pdev->dev,
6071 "Failed to optimize SR-IOV resources\n");
6072 }
6073
6074 status = be_get_resources(adapter);
6075 if (status)
6076 return be_cmd_status(status);
6077
6078 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6079 rtnl_lock();
6080 status = be_update_queues(adapter);
6081 rtnl_unlock();
6082 if (status)
6083 return be_cmd_status(status);
6084
6085 if (adapter->num_vfs)
6086 status = be_vf_setup(adapter);
6087
6088 if (!status)
6089 return adapter->num_vfs;
6090
6091 return 0;
6092}
6093
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006094static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006095 .error_detected = be_eeh_err_detected,
6096 .slot_reset = be_eeh_reset,
6097 .resume = be_eeh_resume,
6098};
6099
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006100static struct pci_driver be_driver = {
6101 .name = DRV_NAME,
6102 .id_table = be_dev_ids,
6103 .probe = be_probe,
6104 .remove = be_remove,
6105 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006106 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006107 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006108 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006109 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006110};
6111
6112static int __init be_init_module(void)
6113{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306114 int status;
6115
Joe Perches8e95a202009-12-03 07:58:21 +00006116 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6117 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006118 printk(KERN_WARNING DRV_NAME
6119 " : Module param rx_frag_size must be 2048/4096/8192."
6120 " Using 2048\n");
6121 rx_frag_size = 2048;
6122 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006123
Vasundhara Volamace40af2015-03-04 00:44:34 -05006124 if (num_vfs > 0) {
6125 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6126 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6127 }
6128
Sathya Perlab7172412016-07-27 05:26:18 -04006129 be_wq = create_singlethread_workqueue("be_wq");
6130 if (!be_wq) {
6131 pr_warn(DRV_NAME "workqueue creation failed\n");
6132 return -1;
6133 }
6134
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306135 be_err_recovery_workq =
6136 create_singlethread_workqueue("be_err_recover");
6137 if (!be_err_recovery_workq)
6138 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6139
6140 status = pci_register_driver(&be_driver);
6141 if (status) {
6142 destroy_workqueue(be_wq);
6143 be_destroy_err_recovery_workq();
6144 }
6145 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006146}
6147module_init(be_init_module);
6148
6149static void __exit be_exit_module(void)
6150{
6151 pci_unregister_driver(&be_driver);
Sathya Perlab7172412016-07-27 05:26:18 -04006152
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306153 be_destroy_err_recovery_workq();
6154
Sathya Perlab7172412016-07-27 05:26:18 -04006155 if (be_wq)
6156 destroy_workqueue(be_wq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006157}
6158module_exit(be_exit_module);