blob: 860104711ce7be74df90b08ede5916d5aa56c4bd [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500128#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
129 BE_IF_FLAGS_BROADCAST | \
130 BE_IF_FLAGS_MULTICAST | \
131 BE_IF_FLAGS_PASS_L3L4_ERRORS)
132
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530136
Sathya Perla1cfafab2012-02-23 18:50:15 +0000137 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000138 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
139 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000140 mem->va = NULL;
141 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142}
143
144static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530145 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146{
147 struct be_dma_mem *mem = &q->dma_mem;
148
149 memset(q, 0, sizeof(*q));
150 q->len = len;
151 q->entry_size = entry_size;
152 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700153 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
154 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000156 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157 return 0;
158}
159
Somnath Kotur68c45a22013-03-14 02:42:07 +0000160static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161{
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163
Sathya Perladb3ea782011-08-22 19:41:52 +0000164 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530165 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000174
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530176 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177}
178
Somnath Kotur68c45a22013-03-14 02:42:07 +0000179static void be_intr_set(struct be_adapter *adapter, bool enable)
180{
181 int status = 0;
182
183 /* On lancer interrupts can't be controlled via this register */
184 if (lancer_chip(adapter))
185 return;
186
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530187 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188 return;
189
190 status = be_cmd_intr_set(adapter, enable);
191 if (status)
192 be_reg_intr_set(adapter, enable);
193}
194
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196{
197 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530198
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530199 if (be_check_error(adapter, BE_ERROR_HW))
200 return;
201
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= qid & DB_RQ_RING_ID_MASK;
203 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000204
205 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000206 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207}
208
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000209static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
210 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211{
212 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530213
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530214 if (be_check_error(adapter, BE_ERROR_HW))
215 return;
216
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000217 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000219
220 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000221 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222}
223
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400225 bool arm, bool clear_int, u16 num_popped,
226 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227{
228 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530231 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000232
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530233 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000234 return;
235
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236 if (arm)
237 val |= 1 << DB_EQ_REARM_SHIFT;
238 if (clear_int)
239 val |= 1 << DB_EQ_CLR_SHIFT;
240 val |= 1 << DB_EQ_EVNT_SHIFT;
241 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400242 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000243 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244}
245
Sathya Perla8788fdc2009-07-27 22:52:03 +0000246void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247{
248 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530249
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000251 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
252 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000253
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530254 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000255 return;
256
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700257 if (arm)
258 val |= 1 << DB_CQ_REARM_SHIFT;
259 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000260 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261}
262
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700263static int be_mac_addr_set(struct net_device *netdev, void *p)
264{
265 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700267 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530268 int status;
269 u8 mac[ETH_ALEN];
270 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700271
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000272 if (!is_valid_ether_addr(addr->sa_data))
273 return -EADDRNOTAVAIL;
274
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530275 /* Proceed further only if, User provided MAC is different
276 * from active MAC
277 */
278 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
279 return 0;
280
Kalesh APbcc84142015-08-05 03:27:48 -0400281 /* if device is not running, copy MAC to netdev->dev_addr */
282 if (!netif_running(netdev))
283 goto done;
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
286 * privilege or if PF did not provision the new MAC address.
287 * On BE3, this cmd will always fail if the VF doesn't have the
288 * FILTMGMT privilege. This failure is OK, only if the PF programmed
289 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530291 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
292 adapter->if_handle, &adapter->pmac_id[0], 0);
293 if (!status) {
294 curr_pmac_id = adapter->pmac_id[0];
295
296 /* Delete the old programmed MAC. This call may fail if the
297 * old MAC was already deleted by the PF driver.
298 */
299 if (adapter->pmac_id[0] != old_pmac_id)
300 be_cmd_pmac_del(adapter, adapter->if_handle,
301 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000302 }
303
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 /* Decide if the new MAC is successfully activated only after
305 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000306 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530307 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
308 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000309 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000310 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311
Sathya Perla5a712c12013-07-23 15:24:59 +0530312 /* The MAC change did not happen, either due to lack of privilege
313 * or PF didn't pre-provision.
314 */
dingtianhong61d23e92013-12-30 15:40:43 +0800315 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530316 status = -EPERM;
317 goto err;
318 }
Kalesh APbcc84142015-08-05 03:27:48 -0400319done:
320 ether_addr_copy(netdev->dev_addr, addr->sa_data);
321 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000322 return 0;
323err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530324 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700325 return status;
326}
327
Sathya Perlaca34fe32012-11-06 17:48:56 +0000328/* BE2 supports only v0 cmd */
329static void *hw_stats_from_cmd(struct be_adapter *adapter)
330{
331 if (BE2_chip(adapter)) {
332 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
333
334 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500335 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000336 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
337
338 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500339 } else {
340 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
341
342 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000343 }
344}
345
346/* BE2 supports only v0 cmd */
347static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
348{
349 if (BE2_chip(adapter)) {
350 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
351
352 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500353 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000354 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
355
356 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500357 } else {
358 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
359
360 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000361 }
362}
363
364static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
367 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
368 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 &rxf_stats->port[adapter->port_num];
371 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_pause_frames = port_stats->rx_pause_frames;
375 drvs->rx_crc_errors = port_stats->rx_crc_errors;
376 drvs->rx_control_frames = port_stats->rx_control_frames;
377 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
378 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
379 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
380 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
381 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
382 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
383 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
384 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
385 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
386 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
387 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_dropped_header_too_small =
390 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000391 drvs->rx_address_filtered =
392 port_stats->rx_address_filtered +
393 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 drvs->rx_alignment_symbol_errors =
395 port_stats->rx_alignment_symbol_errors;
396
397 drvs->tx_pauseframes = port_stats->tx_pauseframes;
398 drvs->tx_controlframes = port_stats->tx_controlframes;
399
400 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000401 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000404 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->forwarded_packets = rxf_stats->forwarded_packets;
407 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000408 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
409 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
411}
412
Sathya Perlaca34fe32012-11-06 17:48:56 +0000413static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000415 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000423 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
424 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->rx_pause_frames = port_stats->rx_pause_frames;
426 drvs->rx_crc_errors = port_stats->rx_crc_errors;
427 drvs->rx_control_frames = port_stats->rx_control_frames;
428 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
429 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
430 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
431 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
432 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
433 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
434 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
435 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
436 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
437 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
440 drvs->rx_input_fifo_overflow_drop =
441 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000442 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000445 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000448 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 drvs->jabber_events = port_stats->jabber_events;
450 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000452 drvs->forwarded_packets = rxf_stats->forwarded_packets;
453 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
455 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000456 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
457}
458
Ajit Khaparde61000862013-10-03 16:16:33 -0500459static void populate_be_v2_stats(struct be_adapter *adapter)
460{
461 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
462 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
463 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
464 struct be_port_rxf_stats_v2 *port_stats =
465 &rxf_stats->port[adapter->port_num];
466 struct be_drv_stats *drvs = &adapter->drv_stats;
467
468 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
469 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
470 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
471 drvs->rx_pause_frames = port_stats->rx_pause_frames;
472 drvs->rx_crc_errors = port_stats->rx_crc_errors;
473 drvs->rx_control_frames = port_stats->rx_control_frames;
474 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
475 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
476 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
477 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
478 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
479 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
480 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
481 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
482 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
483 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
484 drvs->rx_dropped_header_too_small =
485 port_stats->rx_dropped_header_too_small;
486 drvs->rx_input_fifo_overflow_drop =
487 port_stats->rx_input_fifo_overflow_drop;
488 drvs->rx_address_filtered = port_stats->rx_address_filtered;
489 drvs->rx_alignment_symbol_errors =
490 port_stats->rx_alignment_symbol_errors;
491 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
492 drvs->tx_pauseframes = port_stats->tx_pauseframes;
493 drvs->tx_controlframes = port_stats->tx_controlframes;
494 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
495 drvs->jabber_events = port_stats->jabber_events;
496 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
497 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
498 drvs->forwarded_packets = rxf_stats->forwarded_packets;
499 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
500 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
501 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
502 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530503 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500504 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
505 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
506 drvs->rx_roce_frames = port_stats->roce_frames_received;
507 drvs->roce_drops_crc = port_stats->roce_drops_crc;
508 drvs->roce_drops_payload_len =
509 port_stats->roce_drops_payload_len;
510 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500511}
512
Selvin Xavier005d5692011-05-16 07:36:35 +0000513static void populate_lancer_stats(struct be_adapter *adapter)
514{
Selvin Xavier005d5692011-05-16 07:36:35 +0000515 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530516 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517
518 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
519 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
520 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
521 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000522 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000523 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
525 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
526 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
527 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
528 drvs->rx_dropped_tcp_length =
529 pport_stats->rx_dropped_invalid_tcp_length;
530 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
533 drvs->rx_dropped_header_too_small =
534 pport_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000536 drvs->rx_address_filtered =
537 pport_stats->rx_address_filtered +
538 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000539 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000540 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000541 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
542 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000543 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000544 drvs->forwarded_packets = pport_stats->num_forwards_lo;
545 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000546 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000547 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000548}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000549
Sathya Perla09c1c682011-08-22 19:41:53 +0000550static void accumulate_16bit_val(u32 *acc, u16 val)
551{
552#define lo(x) (x & 0xFFFF)
553#define hi(x) (x & 0xFFFF0000)
554 bool wrapped = val < lo(*acc);
555 u32 newacc = hi(*acc) + val;
556
557 if (wrapped)
558 newacc += 65536;
559 ACCESS_ONCE(*acc) = newacc;
560}
561
Jingoo Han4188e7d2013-08-05 18:02:02 +0900562static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530563 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000564{
565 if (!BEx_chip(adapter))
566 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
567 else
568 /* below erx HW counter can actually wrap around after
569 * 65535. Driver accumulates a 32-bit value
570 */
571 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
572 (u16)erx_stat);
573}
574
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000575void be_parse_stats(struct be_adapter *adapter)
576{
Ajit Khaparde61000862013-10-03 16:16:33 -0500577 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000578 struct be_rx_obj *rxo;
579 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000580 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000581
Sathya Perlaca34fe32012-11-06 17:48:56 +0000582 if (lancer_chip(adapter)) {
583 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000584 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000585 if (BE2_chip(adapter))
586 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500587 else if (BE3_chip(adapter))
588 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000589 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500590 else
591 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000592
Ajit Khaparde61000862013-10-03 16:16:33 -0500593 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000594 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000595 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
596 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000597 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000598 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000599}
600
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530602 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700603{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000605 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000607 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 u64 pkts, bytes;
609 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700610 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611
Sathya Perla3abcded2010-10-03 22:12:27 -0700612 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530614
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700616 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 pkts = rx_stats(rxo)->rx_pkts;
618 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700619 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000620 stats->rx_packets += pkts;
621 stats->rx_bytes += bytes;
622 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
623 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
624 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700625 }
626
Sathya Perla3c8def92011-06-12 20:01:58 +0000627 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000628 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530629
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700631 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 pkts = tx_stats(txo)->tx_pkts;
633 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700634 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->tx_packets += pkts;
636 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000637 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_alignment_symbol_errors +
642 drvs->rx_in_range_errors +
643 drvs->rx_out_range_errors +
644 drvs->rx_frame_too_long +
645 drvs->rx_dropped_too_small +
646 drvs->rx_dropped_too_short +
647 drvs->rx_dropped_header_too_small +
648 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000652 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000653 drvs->rx_out_range_errors +
654 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000655
Sathya Perlaab1594e2011-07-25 19:10:15 +0000656 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
658 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000659 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000660
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 /* receiver fifo overrun */
662 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000663 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000664 drvs->rx_input_fifo_overflow_drop +
665 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667}
668
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000669void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 struct net_device *netdev = adapter->netdev;
672
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000673 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000674 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000675 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000677
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530678 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000679 netif_carrier_on(netdev);
680 else
681 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200682
683 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684}
685
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500686static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687{
Sathya Perla3c8def92011-06-12 20:01:58 +0000688 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530689 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000690
Sathya Perlaab1594e2011-07-25 19:10:15 +0000691 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000692 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500693 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530694 stats->tx_pkts += tx_pkts;
695 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
696 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000697 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500700/* Returns number of WRBs needed for the skb */
701static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500703 /* +1 for the header wrb */
704 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705}
706
707static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
708{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500709 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
710 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
711 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
712 wrb->rsvd0 = 0;
713}
714
715/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
716 * to avoid the swap and shift/mask operations in wrb_fill().
717 */
718static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
719{
720 wrb->frag_pa_hi = 0;
721 wrb->frag_pa_lo = 0;
722 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000723 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724}
725
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000726static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530727 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000728{
729 u8 vlan_prio;
730 u16 vlan_tag;
731
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100732 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000733 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
734 /* If vlan priority provided by OS is NOT in available bmap */
735 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
736 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500737 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000738
739 return vlan_tag;
740}
741
Sathya Perlac9c47142014-03-27 10:46:19 +0530742/* Used only for IP tunnel packets */
743static u16 skb_inner_ip_proto(struct sk_buff *skb)
744{
745 return (inner_ip_hdr(skb)->version == 4) ?
746 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
747}
748
749static u16 skb_ip_proto(struct sk_buff *skb)
750{
751 return (ip_hdr(skb)->version == 4) ?
752 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
753}
754
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530755static inline bool be_is_txq_full(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
758}
759
760static inline bool be_can_txq_wake(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) < txo->q.len / 2;
763}
764
765static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
766{
767 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
768}
769
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530770static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
771 struct sk_buff *skb,
772 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530774 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000776 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, LSO, 1);
778 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000779 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530780 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530782 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530783 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530784 proto = skb_inner_ip_proto(skb);
785 } else {
786 proto = skb_ip_proto(skb);
787 }
788 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530789 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530790 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530791 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
793
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100794 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530795 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
796 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797 }
798
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530799 BE_WRB_F_SET(wrb_params->features, CRC, 1);
800}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500801
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530802static void wrb_fill_hdr(struct be_adapter *adapter,
803 struct be_eth_hdr_wrb *hdr,
804 struct be_wrb_params *wrb_params,
805 struct sk_buff *skb)
806{
807 memset(hdr, 0, sizeof(*hdr));
808
809 SET_TX_WRB_HDR_BITS(crc, hdr,
810 BE_WRB_F_GET(wrb_params->features, CRC));
811 SET_TX_WRB_HDR_BITS(ipcs, hdr,
812 BE_WRB_F_GET(wrb_params->features, IPCS));
813 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
814 BE_WRB_F_GET(wrb_params->features, TCPCS));
815 SET_TX_WRB_HDR_BITS(udpcs, hdr,
816 BE_WRB_F_GET(wrb_params->features, UDPCS));
817
818 SET_TX_WRB_HDR_BITS(lso, hdr,
819 BE_WRB_F_GET(wrb_params->features, LSO));
820 SET_TX_WRB_HDR_BITS(lso6, hdr,
821 BE_WRB_F_GET(wrb_params->features, LSO6));
822 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
823
824 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
825 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500826 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530827 SET_TX_WRB_HDR_BITS(event, hdr,
828 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
829 SET_TX_WRB_HDR_BITS(vlan, hdr,
830 BE_WRB_F_GET(wrb_params->features, VLAN));
831 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
832
833 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
834 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530835 SET_TX_WRB_HDR_BITS(mgmt, hdr,
836 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837}
838
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000839static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530840 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000841{
842 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500843 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000844
Sathya Perla7101e112010-03-22 20:41:12 +0000845
Sathya Perlaf986afc2015-02-06 08:18:43 -0500846 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
847 (u64)le32_to_cpu(wrb->frag_pa_lo);
848 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000849 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500850 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500852 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000853 }
854}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530856/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530857static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530859 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700860
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530861 queue_head_inc(&txo->q);
862 return head;
863}
864
865/* Set up the WRB header for xmit */
866static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
867 struct be_tx_obj *txo,
868 struct be_wrb_params *wrb_params,
869 struct sk_buff *skb, u16 head)
870{
871 u32 num_frags = skb_wrb_cnt(skb);
872 struct be_queue_info *txq = &txo->q;
873 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
874
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530875 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500876 be_dws_cpu_to_le(hdr, sizeof(*hdr));
877
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500878 BUG_ON(txo->sent_skb_list[head]);
879 txo->sent_skb_list[head] = skb;
880 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530881 atomic_add(num_frags, &txq->used);
882 txo->last_req_wrb_cnt = num_frags;
883 txo->pend_wrb_cnt += num_frags;
884}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700885
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530886/* Setup a WRB fragment (buffer descriptor) for xmit */
887static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
888 int len)
889{
890 struct be_eth_wrb *wrb;
891 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530893 wrb = queue_head_node(txq);
894 wrb_fill(wrb, busaddr, len);
895 queue_head_inc(txq);
896}
897
898/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
899 * was invoked. The producer index is restored to the previous packet and the
900 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
901 */
902static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530903 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530904 u32 copied)
905{
906 struct device *dev;
907 struct be_eth_wrb *wrb;
908 struct be_queue_info *txq = &txo->q;
909
910 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500911 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530912
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500913 /* skip the first wrb (hdr); it's not mapped */
914 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000915 while (copied) {
916 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000917 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000918 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500919 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000920 queue_head_inc(txq);
921 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530922
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500923 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530924}
925
926/* Enqueue the given packet for transmit. This routine allocates WRBs for the
927 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
928 * of WRBs used up by the packet.
929 */
930static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
931 struct sk_buff *skb,
932 struct be_wrb_params *wrb_params)
933{
934 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
935 struct device *dev = &adapter->pdev->dev;
936 struct be_queue_info *txq = &txo->q;
937 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530938 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530939 dma_addr_t busaddr;
940 int len;
941
942 head = be_tx_get_wrb_hdr(txo);
943
944 if (skb->len > skb->data_len) {
945 len = skb_headlen(skb);
946
947 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
948 if (dma_mapping_error(dev, busaddr))
949 goto dma_err;
950 map_single = true;
951 be_tx_setup_wrb_frag(txo, busaddr, len);
952 copied += len;
953 }
954
955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
956 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
957 len = skb_frag_size(frag);
958
959 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
960 if (dma_mapping_error(dev, busaddr))
961 goto dma_err;
962 be_tx_setup_wrb_frag(txo, busaddr, len);
963 copied += len;
964 }
965
966 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
967
968 be_tx_stats_update(txo, skb);
969 return wrb_cnt;
970
971dma_err:
972 adapter->drv_stats.dma_map_errors++;
973 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000974 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700975}
976
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500977static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
978{
979 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
980}
981
Somnath Kotur93040ae2012-06-26 22:32:10 +0000982static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000983 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530984 struct be_wrb_params
985 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000986{
987 u16 vlan_tag = 0;
988
989 skb = skb_share_check(skb, GFP_ATOMIC);
990 if (unlikely(!skb))
991 return skb;
992
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100993 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000994 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530995
996 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
997 if (!vlan_tag)
998 vlan_tag = adapter->pvid;
999 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1000 * skip VLAN insertion
1001 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301002 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301003 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001004
1005 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001006 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1007 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001008 if (unlikely(!skb))
1009 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001010 skb->vlan_tci = 0;
1011 }
1012
1013 /* Insert the outer VLAN, if any */
1014 if (adapter->qnq_vid) {
1015 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001016 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1017 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001018 if (unlikely(!skb))
1019 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301020 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001021 }
1022
Somnath Kotur93040ae2012-06-26 22:32:10 +00001023 return skb;
1024}
1025
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001026static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1027{
1028 struct ethhdr *eh = (struct ethhdr *)skb->data;
1029 u16 offset = ETH_HLEN;
1030
1031 if (eh->h_proto == htons(ETH_P_IPV6)) {
1032 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1033
1034 offset += sizeof(struct ipv6hdr);
1035 if (ip6h->nexthdr != NEXTHDR_TCP &&
1036 ip6h->nexthdr != NEXTHDR_UDP) {
1037 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301038 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001039
1040 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1041 if (ehdr->hdrlen == 0xff)
1042 return true;
1043 }
1044 }
1045 return false;
1046}
1047
1048static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1049{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001050 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051}
1052
Sathya Perla748b5392014-05-09 13:29:13 +05301053static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001054{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001055 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001056}
1057
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301058static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1059 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301060 struct be_wrb_params
1061 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001063 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001064 unsigned int eth_hdr_len;
1065 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001066
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001067 /* For padded packets, BE HW modifies tot_len field in IP header
1068 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001069 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001070 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001071 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1072 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001073 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001074 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001075 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001076 ip = (struct iphdr *)ip_hdr(skb);
1077 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1078 }
1079
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001080 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301081 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001082 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301083 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001084 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301085 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001086
Somnath Kotur93040ae2012-06-26 22:32:10 +00001087 /* HW has a bug wherein it will calculate CSUM for VLAN
1088 * pkts even though it is disabled.
1089 * Manually insert VLAN in pkt.
1090 */
1091 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001092 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301093 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001094 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301095 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001096 }
1097
1098 /* HW may lockup when VLAN HW tagging is requested on
1099 * certain ipv6 packets. Drop such pkts if the HW workaround to
1100 * skip HW tagging is not enabled by FW.
1101 */
1102 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301103 (adapter->pvid || adapter->qnq_vid) &&
1104 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001105 goto tx_drop;
1106
1107 /* Manual VLAN tag insertion to prevent:
1108 * ASIC lockup when the ASIC inserts VLAN tag into
1109 * certain ipv6 packets. Insert VLAN tags in driver,
1110 * and set event, completion, vlan bits accordingly
1111 * in the Tx WRB.
1112 */
1113 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1114 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301115 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001116 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301117 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001118 }
1119
Sathya Perlaee9c7992013-05-22 23:04:55 +00001120 return skb;
1121tx_drop:
1122 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301123err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001124 return NULL;
1125}
1126
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301127static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1128 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301129 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301130{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301131 int err;
1132
Suresh Reddy8227e992015-10-12 03:47:19 -04001133 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1134 * packets that are 32b or less may cause a transmit stall
1135 * on that port. The workaround is to pad such packets
1136 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301137 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001138 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001139 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301140 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301141 }
1142
1143 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301144 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301145 if (!skb)
1146 return NULL;
1147 }
1148
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301149 /* The stack can send us skbs with length greater than
1150 * what the HW can handle. Trim the extra bytes.
1151 */
1152 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1153 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1154 WARN_ON(err);
1155
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301156 return skb;
1157}
1158
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001159static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1160{
1161 struct be_queue_info *txq = &txo->q;
1162 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1163
1164 /* Mark the last request eventable if it hasn't been marked already */
1165 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1166 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1167
1168 /* compose a dummy wrb if there are odd set of wrbs to notify */
1169 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001170 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001171 queue_head_inc(txq);
1172 atomic_inc(&txq->used);
1173 txo->pend_wrb_cnt++;
1174 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1175 TX_HDR_WRB_NUM_SHIFT);
1176 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1177 TX_HDR_WRB_NUM_SHIFT);
1178 }
1179 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1180 txo->pend_wrb_cnt = 0;
1181}
1182
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301183/* OS2BMC related */
1184
1185#define DHCP_CLIENT_PORT 68
1186#define DHCP_SERVER_PORT 67
1187#define NET_BIOS_PORT1 137
1188#define NET_BIOS_PORT2 138
1189#define DHCPV6_RAS_PORT 547
1190
1191#define is_mc_allowed_on_bmc(adapter, eh) \
1192 (!is_multicast_filt_enabled(adapter) && \
1193 is_multicast_ether_addr(eh->h_dest) && \
1194 !is_broadcast_ether_addr(eh->h_dest))
1195
1196#define is_bc_allowed_on_bmc(adapter, eh) \
1197 (!is_broadcast_filt_enabled(adapter) && \
1198 is_broadcast_ether_addr(eh->h_dest))
1199
1200#define is_arp_allowed_on_bmc(adapter, skb) \
1201 (is_arp(skb) && is_arp_filt_enabled(adapter))
1202
1203#define is_broadcast_packet(eh, adapter) \
1204 (is_multicast_ether_addr(eh->h_dest) && \
1205 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1206
1207#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1208
1209#define is_arp_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1211
1212#define is_dhcp_client_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1214
1215#define is_dhcp_srvr_filt_enabled(adapter) \
1216 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1217
1218#define is_nbios_filt_enabled(adapter) \
1219 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1220
1221#define is_ipv6_na_filt_enabled(adapter) \
1222 (adapter->bmc_filt_mask & \
1223 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1224
1225#define is_ipv6_ra_filt_enabled(adapter) \
1226 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1227
1228#define is_ipv6_ras_filt_enabled(adapter) \
1229 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1230
1231#define is_broadcast_filt_enabled(adapter) \
1232 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1233
1234#define is_multicast_filt_enabled(adapter) \
1235 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1236
1237static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1238 struct sk_buff **skb)
1239{
1240 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1241 bool os2bmc = false;
1242
1243 if (!be_is_os2bmc_enabled(adapter))
1244 goto done;
1245
1246 if (!is_multicast_ether_addr(eh->h_dest))
1247 goto done;
1248
1249 if (is_mc_allowed_on_bmc(adapter, eh) ||
1250 is_bc_allowed_on_bmc(adapter, eh) ||
1251 is_arp_allowed_on_bmc(adapter, (*skb))) {
1252 os2bmc = true;
1253 goto done;
1254 }
1255
1256 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1257 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1258 u8 nexthdr = hdr->nexthdr;
1259
1260 if (nexthdr == IPPROTO_ICMPV6) {
1261 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1262
1263 switch (icmp6->icmp6_type) {
1264 case NDISC_ROUTER_ADVERTISEMENT:
1265 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1266 goto done;
1267 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1268 os2bmc = is_ipv6_na_filt_enabled(adapter);
1269 goto done;
1270 default:
1271 break;
1272 }
1273 }
1274 }
1275
1276 if (is_udp_pkt((*skb))) {
1277 struct udphdr *udp = udp_hdr((*skb));
1278
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001279 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301280 case DHCP_CLIENT_PORT:
1281 os2bmc = is_dhcp_client_filt_enabled(adapter);
1282 goto done;
1283 case DHCP_SERVER_PORT:
1284 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1285 goto done;
1286 case NET_BIOS_PORT1:
1287 case NET_BIOS_PORT2:
1288 os2bmc = is_nbios_filt_enabled(adapter);
1289 goto done;
1290 case DHCPV6_RAS_PORT:
1291 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1292 goto done;
1293 default:
1294 break;
1295 }
1296 }
1297done:
1298 /* For packets over a vlan, which are destined
1299 * to BMC, asic expects the vlan to be inline in the packet.
1300 */
1301 if (os2bmc)
1302 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1303
1304 return os2bmc;
1305}
1306
Sathya Perlaee9c7992013-05-22 23:04:55 +00001307static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1308{
1309 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001310 u16 q_idx = skb_get_queue_mapping(skb);
1311 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301312 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301313 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001314 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001315
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301316 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001317 if (unlikely(!skb))
1318 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001319
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301320 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1321
1322 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001323 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001324 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001325 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001327
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301328 /* if os2bmc is enabled and if the pkt is destined to bmc,
1329 * enqueue the pkt a 2nd time with mgmt bit set.
1330 */
1331 if (be_send_pkt_to_bmc(adapter, &skb)) {
1332 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1333 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1334 if (unlikely(!wrb_cnt))
1335 goto drop;
1336 else
1337 skb_get(skb);
1338 }
1339
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301340 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001341 netif_stop_subqueue(netdev, q_idx);
1342 tx_stats(txo)->tx_stops++;
1343 }
1344
1345 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1346 be_xmit_flush(adapter, txo);
1347
1348 return NETDEV_TX_OK;
1349drop:
1350 tx_stats(txo)->tx_drv_drops++;
1351 /* Flush the already enqueued tx requests */
1352 if (flush && txo->pend_wrb_cnt)
1353 be_xmit_flush(adapter, txo);
1354
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 return NETDEV_TX_OK;
1356}
1357
1358static int be_change_mtu(struct net_device *netdev, int new_mtu)
1359{
1360 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301361 struct device *dev = &adapter->pdev->dev;
1362
1363 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1364 dev_info(dev, "MTU must be between %d and %d bytes\n",
1365 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366 return -EINVAL;
1367 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301368
1369 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301370 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371 netdev->mtu = new_mtu;
1372 return 0;
1373}
1374
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001375static inline bool be_in_all_promisc(struct be_adapter *adapter)
1376{
1377 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1378 BE_IF_FLAGS_ALL_PROMISCUOUS;
1379}
1380
1381static int be_set_vlan_promisc(struct be_adapter *adapter)
1382{
1383 struct device *dev = &adapter->pdev->dev;
1384 int status;
1385
1386 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1387 return 0;
1388
1389 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1390 if (!status) {
1391 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1392 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1393 } else {
1394 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1395 }
1396 return status;
1397}
1398
1399static int be_clear_vlan_promisc(struct be_adapter *adapter)
1400{
1401 struct device *dev = &adapter->pdev->dev;
1402 int status;
1403
1404 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1405 if (!status) {
1406 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1407 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1408 }
1409 return status;
1410}
1411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001413 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1414 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415 */
Sathya Perla10329df2012-06-05 19:37:18 +00001416static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417{
Vasundhara Volam50762662014-09-12 17:39:14 +05301418 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001419 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301420 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001421 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001422
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001423 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001424 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001425 return 0;
1426
Sathya Perla92bf14a2013-08-27 16:57:32 +05301427 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001428 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001429
1430 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301431 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1432 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001433
Vasundhara Volam435452a2015-03-20 06:28:23 -04001434 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001435 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001436 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001437 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001438 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1439 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301440 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001441 return be_set_vlan_promisc(adapter);
1442 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1443 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001444 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001445 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446}
1447
Patrick McHardy80d5c362013-04-19 02:04:28 +00001448static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449{
1450 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001451 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001453 /* Packets with VID 0 are always received by Lancer by default */
1454 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301455 return status;
1456
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301457 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301458 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001459
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301460 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301461 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001462
Somnath Kotura6b74e02014-01-21 15:50:55 +05301463 status = be_vid_config(adapter);
1464 if (status) {
1465 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301466 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301467 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301468
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001469 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470}
1471
Patrick McHardy80d5c362013-04-19 02:04:28 +00001472static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473{
1474 struct be_adapter *adapter = netdev_priv(netdev);
1475
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001476 /* Packets with VID 0 are always received by Lancer by default */
1477 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301478 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001479
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301480 if (!test_bit(vid, adapter->vids))
1481 return 0;
1482
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301483 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301484 adapter->vlans_added--;
1485
1486 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487}
1488
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001489static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301490{
Sathya Perlaac34b742015-02-06 08:18:40 -05001491 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001492 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1493}
1494
1495static void be_set_all_promisc(struct be_adapter *adapter)
1496{
1497 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1498 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1499}
1500
1501static void be_set_mc_promisc(struct be_adapter *adapter)
1502{
1503 int status;
1504
1505 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1506 return;
1507
1508 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1509 if (!status)
1510 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1511}
1512
1513static void be_set_mc_list(struct be_adapter *adapter)
1514{
1515 int status;
1516
1517 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1518 if (!status)
1519 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1520 else
1521 be_set_mc_promisc(adapter);
1522}
1523
1524static void be_set_uc_list(struct be_adapter *adapter)
1525{
1526 struct netdev_hw_addr *ha;
1527 int i = 1; /* First slot is claimed by the Primary MAC */
1528
1529 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1530 be_cmd_pmac_del(adapter, adapter->if_handle,
1531 adapter->pmac_id[i], 0);
1532
1533 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1534 be_set_all_promisc(adapter);
1535 return;
1536 }
1537
1538 netdev_for_each_uc_addr(ha, adapter->netdev) {
1539 adapter->uc_macs++; /* First slot is for Primary MAC */
1540 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1541 &adapter->pmac_id[adapter->uc_macs], 0);
1542 }
1543}
1544
1545static void be_clear_uc_list(struct be_adapter *adapter)
1546{
1547 int i;
1548
1549 for (i = 1; i < (adapter->uc_macs + 1); i++)
1550 be_cmd_pmac_del(adapter, adapter->if_handle,
1551 adapter->pmac_id[i], 0);
1552 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301553}
1554
Sathya Perlaa54769f2011-10-24 02:45:00 +00001555static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556{
1557 struct be_adapter *adapter = netdev_priv(netdev);
1558
1559 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001560 be_set_all_promisc(adapter);
1561 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001562 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001563
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001564 /* Interface was previously in promiscuous mode; disable it */
1565 if (be_in_all_promisc(adapter)) {
1566 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001567 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001568 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001569 }
1570
Sathya Perlae7b909a2009-11-22 22:01:10 +00001571 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001572 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001573 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1574 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301575 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001576 }
Kalesh APa0794882014-05-30 19:06:23 +05301577
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001578 if (netdev_uc_count(netdev) != adapter->uc_macs)
1579 be_set_uc_list(adapter);
1580
1581 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582}
1583
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001584static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1585{
1586 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001587 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001588 int status;
1589
Sathya Perla11ac75e2011-12-13 00:58:50 +00001590 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001591 return -EPERM;
1592
Sathya Perla11ac75e2011-12-13 00:58:50 +00001593 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001594 return -EINVAL;
1595
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301596 /* Proceed further only if user provided MAC is different
1597 * from active MAC
1598 */
1599 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1600 return 0;
1601
Sathya Perla3175d8c2013-07-23 15:25:03 +05301602 if (BEx_chip(adapter)) {
1603 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1604 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001605
Sathya Perla11ac75e2011-12-13 00:58:50 +00001606 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1607 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301608 } else {
1609 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1610 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001611 }
1612
Kalesh APabccf232014-07-17 16:20:24 +05301613 if (status) {
1614 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1615 mac, vf, status);
1616 return be_cmd_status(status);
1617 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001618
Kalesh APabccf232014-07-17 16:20:24 +05301619 ether_addr_copy(vf_cfg->mac_addr, mac);
1620
1621 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001622}
1623
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001624static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301625 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001626{
1627 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001628 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001629
Sathya Perla11ac75e2011-12-13 00:58:50 +00001630 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001631 return -EPERM;
1632
Sathya Perla11ac75e2011-12-13 00:58:50 +00001633 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001634 return -EINVAL;
1635
1636 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001637 vi->max_tx_rate = vf_cfg->tx_rate;
1638 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001639 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1640 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001641 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301642 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001643 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001644
1645 return 0;
1646}
1647
Vasundhara Volam435452a2015-03-20 06:28:23 -04001648static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1649{
1650 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1651 u16 vids[BE_NUM_VLANS_SUPPORTED];
1652 int vf_if_id = vf_cfg->if_handle;
1653 int status;
1654
1655 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001656 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001657 if (status)
1658 return status;
1659
1660 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1661 vids[0] = 0;
1662 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1663 if (!status)
1664 dev_info(&adapter->pdev->dev,
1665 "Cleared guest VLANs on VF%d", vf);
1666
1667 /* After TVT is enabled, disallow VFs to program VLAN filters */
1668 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1669 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1670 ~BE_PRIV_FILTMGMT, vf + 1);
1671 if (!status)
1672 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1673 }
1674 return 0;
1675}
1676
1677static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1678{
1679 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1680 struct device *dev = &adapter->pdev->dev;
1681 int status;
1682
1683 /* Reset Transparent VLAN Tagging. */
1684 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001685 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001686 if (status)
1687 return status;
1688
1689 /* Allow VFs to program VLAN filtering */
1690 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1691 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1692 BE_PRIV_FILTMGMT, vf + 1);
1693 if (!status) {
1694 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1695 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1696 }
1697 }
1698
1699 dev_info(dev,
1700 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1701 return 0;
1702}
1703
Sathya Perla748b5392014-05-09 13:29:13 +05301704static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001705{
1706 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001707 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001708 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001709
Sathya Perla11ac75e2011-12-13 00:58:50 +00001710 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001711 return -EPERM;
1712
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001713 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001714 return -EINVAL;
1715
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001716 if (vlan || qos) {
1717 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001718 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001719 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001720 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001721 }
1722
Kalesh APabccf232014-07-17 16:20:24 +05301723 if (status) {
1724 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001725 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1726 status);
Kalesh APabccf232014-07-17 16:20:24 +05301727 return be_cmd_status(status);
1728 }
1729
1730 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301731 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001732}
1733
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001734static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1735 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001736{
1737 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301738 struct device *dev = &adapter->pdev->dev;
1739 int percent_rate, status = 0;
1740 u16 link_speed = 0;
1741 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001742
Sathya Perla11ac75e2011-12-13 00:58:50 +00001743 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001744 return -EPERM;
1745
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001746 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001747 return -EINVAL;
1748
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001749 if (min_tx_rate)
1750 return -EINVAL;
1751
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301752 if (!max_tx_rate)
1753 goto config_qos;
1754
1755 status = be_cmd_link_status_query(adapter, &link_speed,
1756 &link_status, 0);
1757 if (status)
1758 goto err;
1759
1760 if (!link_status) {
1761 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301762 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301763 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001764 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001765
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301766 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1767 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1768 link_speed);
1769 status = -EINVAL;
1770 goto err;
1771 }
1772
1773 /* On Skyhawk the QOS setting must be done only as a % value */
1774 percent_rate = link_speed / 100;
1775 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1776 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1777 percent_rate);
1778 status = -EINVAL;
1779 goto err;
1780 }
1781
1782config_qos:
1783 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001784 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301785 goto err;
1786
1787 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1788 return 0;
1789
1790err:
1791 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1792 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301793 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001794}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301795
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301796static int be_set_vf_link_state(struct net_device *netdev, int vf,
1797 int link_state)
1798{
1799 struct be_adapter *adapter = netdev_priv(netdev);
1800 int status;
1801
1802 if (!sriov_enabled(adapter))
1803 return -EPERM;
1804
1805 if (vf >= adapter->num_vfs)
1806 return -EINVAL;
1807
1808 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301809 if (status) {
1810 dev_err(&adapter->pdev->dev,
1811 "Link state change on VF %d failed: %#x\n", vf, status);
1812 return be_cmd_status(status);
1813 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301814
Kalesh APabccf232014-07-17 16:20:24 +05301815 adapter->vf_cfg[vf].plink_tracking = link_state;
1816
1817 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301818}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001819
Kalesh APe7bcbd72015-05-06 05:30:32 -04001820static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1821{
1822 struct be_adapter *adapter = netdev_priv(netdev);
1823 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1824 u8 spoofchk;
1825 int status;
1826
1827 if (!sriov_enabled(adapter))
1828 return -EPERM;
1829
1830 if (vf >= adapter->num_vfs)
1831 return -EINVAL;
1832
1833 if (BEx_chip(adapter))
1834 return -EOPNOTSUPP;
1835
1836 if (enable == vf_cfg->spoofchk)
1837 return 0;
1838
1839 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1840
1841 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1842 0, spoofchk);
1843 if (status) {
1844 dev_err(&adapter->pdev->dev,
1845 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1846 return be_cmd_status(status);
1847 }
1848
1849 vf_cfg->spoofchk = enable;
1850 return 0;
1851}
1852
Sathya Perla2632baf2013-10-01 16:00:00 +05301853static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1854 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855{
Sathya Perla2632baf2013-10-01 16:00:00 +05301856 aic->rx_pkts_prev = rx_pkts;
1857 aic->tx_reqs_prev = tx_pkts;
1858 aic->jiffies = now;
1859}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001860
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001861static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301862{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001863 struct be_adapter *adapter = eqo->adapter;
1864 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301865 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301866 struct be_rx_obj *rxo;
1867 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001868 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301869 ulong now;
1870 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001871 int i;
1872
1873 aic = &adapter->aic_obj[eqo->idx];
1874 if (!aic->enable) {
1875 if (aic->jiffies)
1876 aic->jiffies = 0;
1877 eqd = aic->et_eqd;
1878 return eqd;
1879 }
1880
1881 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1882 do {
1883 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1884 rx_pkts += rxo->stats.rx_pkts;
1885 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1886 }
1887
1888 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1889 do {
1890 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1891 tx_pkts += txo->stats.tx_reqs;
1892 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1893 }
1894
1895 /* Skip, if wrapped around or first calculation */
1896 now = jiffies;
1897 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1898 rx_pkts < aic->rx_pkts_prev ||
1899 tx_pkts < aic->tx_reqs_prev) {
1900 be_aic_update(aic, rx_pkts, tx_pkts, now);
1901 return aic->prev_eqd;
1902 }
1903
1904 delta = jiffies_to_msecs(now - aic->jiffies);
1905 if (delta == 0)
1906 return aic->prev_eqd;
1907
1908 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1909 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1910 eqd = (pps / 15000) << 2;
1911
1912 if (eqd < 8)
1913 eqd = 0;
1914 eqd = min_t(u32, eqd, aic->max_eqd);
1915 eqd = max_t(u32, eqd, aic->min_eqd);
1916
1917 be_aic_update(aic, rx_pkts, tx_pkts, now);
1918
1919 return eqd;
1920}
1921
1922/* For Skyhawk-R only */
1923static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1924{
1925 struct be_adapter *adapter = eqo->adapter;
1926 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1927 ulong now = jiffies;
1928 int eqd;
1929 u32 mult_enc;
1930
1931 if (!aic->enable)
1932 return 0;
1933
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05301934 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001935 eqd = aic->prev_eqd;
1936 else
1937 eqd = be_get_new_eqd(eqo);
1938
1939 if (eqd > 100)
1940 mult_enc = R2I_DLY_ENC_1;
1941 else if (eqd > 60)
1942 mult_enc = R2I_DLY_ENC_2;
1943 else if (eqd > 20)
1944 mult_enc = R2I_DLY_ENC_3;
1945 else
1946 mult_enc = R2I_DLY_ENC_0;
1947
1948 aic->prev_eqd = eqd;
1949
1950 return mult_enc;
1951}
1952
1953void be_eqd_update(struct be_adapter *adapter, bool force_update)
1954{
1955 struct be_set_eqd set_eqd[MAX_EVT_QS];
1956 struct be_aic_obj *aic;
1957 struct be_eq_obj *eqo;
1958 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001959
Sathya Perla2632baf2013-10-01 16:00:00 +05301960 for_all_evt_queues(adapter, eqo, i) {
1961 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001962 eqd = be_get_new_eqd(eqo);
1963 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301964 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1965 set_eqd[num].eq_id = eqo->q.id;
1966 aic->prev_eqd = eqd;
1967 num++;
1968 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001969 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301970
1971 if (num)
1972 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001973}
1974
Sathya Perla3abcded2010-10-03 22:12:27 -07001975static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301976 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001977{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001978 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001979
Sathya Perlaab1594e2011-07-25 19:10:15 +00001980 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001981 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001982 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001983 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05301984 if (rxcp->tunneled)
1985 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001986 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001987 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001988 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001989 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001990 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991}
1992
Sathya Perla2e588f82011-03-11 02:49:26 +00001993static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001994{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001995 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301996 * Also ignore ipcksm for ipv6 pkts
1997 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001998 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301999 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002000}
2001
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302002static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002003{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002005 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002006 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302007 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008
Sathya Perla3abcded2010-10-03 22:12:27 -07002009 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010 BUG_ON(!rx_page_info->page);
2011
Sathya Perlae50287b2014-03-04 12:14:38 +05302012 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002013 dma_unmap_page(&adapter->pdev->dev,
2014 dma_unmap_addr(rx_page_info, bus),
2015 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302016 rx_page_info->last_frag = false;
2017 } else {
2018 dma_sync_single_for_cpu(&adapter->pdev->dev,
2019 dma_unmap_addr(rx_page_info, bus),
2020 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002021 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002022
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302023 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002024 atomic_dec(&rxq->used);
2025 return rx_page_info;
2026}
2027
2028/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002029static void be_rx_compl_discard(struct be_rx_obj *rxo,
2030 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002031{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002033 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002035 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302036 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002037 put_page(page_info->page);
2038 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039 }
2040}
2041
2042/*
2043 * skb_fill_rx_data forms a complete skb for an ether frame
2044 * indicated by rxcp.
2045 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002046static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2047 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002048{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002050 u16 i, j;
2051 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052 u8 *start;
2053
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302054 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055 start = page_address(page_info->page) + page_info->page_offset;
2056 prefetch(start);
2057
2058 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002059 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002061 skb->len = curr_frag_len;
2062 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002063 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064 /* Complete packet has now been moved to data */
2065 put_page(page_info->page);
2066 skb->data_len = 0;
2067 skb->tail += curr_frag_len;
2068 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002069 hdr_len = ETH_HLEN;
2070 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002072 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073 skb_shinfo(skb)->frags[0].page_offset =
2074 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302075 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2076 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002078 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079 skb->tail += hdr_len;
2080 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002081 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082
Sathya Perla2e588f82011-03-11 02:49:26 +00002083 if (rxcp->pkt_size <= rx_frag_size) {
2084 BUG_ON(rxcp->num_rcvd != 1);
2085 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002086 }
2087
2088 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002089 remaining = rxcp->pkt_size - curr_frag_len;
2090 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302091 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002092 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002094 /* Coalesce all frags from the same physical page in one slot */
2095 if (page_info->page_offset == 0) {
2096 /* Fresh page */
2097 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002098 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002099 skb_shinfo(skb)->frags[j].page_offset =
2100 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002101 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002102 skb_shinfo(skb)->nr_frags++;
2103 } else {
2104 put_page(page_info->page);
2105 }
2106
Eric Dumazet9e903e02011-10-18 21:00:24 +00002107 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002108 skb->len += curr_frag_len;
2109 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002110 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002111 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002112 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002114 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115}
2116
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002117/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302118static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002119 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002122 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002124
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002125 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002126 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002127 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002128 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129 return;
2130 }
2131
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002132 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002133
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002134 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002135 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002136 else
2137 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002139 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002140 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002141 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002142 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302143
Tom Herbertb6c0e892014-08-27 21:27:17 -07002144 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302145 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146
Jiri Pirko343e43c2011-08-25 02:50:51 +00002147 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002148 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002149
2150 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151}
2152
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002153/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002154static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2155 struct napi_struct *napi,
2156 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002158 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002159 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002160 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002161 u16 remaining, curr_frag_len;
2162 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002163
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002164 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002165 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002166 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002167 return;
2168 }
2169
Sathya Perla2e588f82011-03-11 02:49:26 +00002170 remaining = rxcp->pkt_size;
2171 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302172 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173
2174 curr_frag_len = min(remaining, rx_frag_size);
2175
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002176 /* Coalesce all frags from the same physical page in one slot */
2177 if (i == 0 || page_info->page_offset == 0) {
2178 /* First frag or Fresh page */
2179 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002180 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002181 skb_shinfo(skb)->frags[j].page_offset =
2182 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002183 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002184 } else {
2185 put_page(page_info->page);
2186 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002187 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002188 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002190 memset(page_info, 0, sizeof(*page_info));
2191 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002192 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002194 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002195 skb->len = rxcp->pkt_size;
2196 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002197 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002198 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002199 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002200 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302201
Tom Herbertb6c0e892014-08-27 21:27:17 -07002202 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002203
Jiri Pirko343e43c2011-08-25 02:50:51 +00002204 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002205 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002206
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002207 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002208}
2209
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2211 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302213 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2214 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2215 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2216 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2217 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2218 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2219 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2220 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2221 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2222 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2223 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002224 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302225 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2226 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002227 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302228 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302229 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302230 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002231}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002232
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002233static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2234 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002235{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302236 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2237 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2238 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2239 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2240 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2241 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2242 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2243 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2244 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2245 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2246 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002247 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302248 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2249 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002250 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302251 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2252 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002253}
2254
2255static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2256{
2257 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2258 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2259 struct be_adapter *adapter = rxo->adapter;
2260
2261 /* For checking the valid bit it is Ok to use either definition as the
2262 * valid bit is at the same position in both v0 and v1 Rx compl */
2263 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002264 return NULL;
2265
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002266 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002267 be_dws_le_to_cpu(compl, sizeof(*compl));
2268
2269 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002271 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002272 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002273
Somnath Koture38b1702013-05-29 22:55:56 +00002274 if (rxcp->ip_frag)
2275 rxcp->l4_csum = 0;
2276
Sathya Perla15d72182011-03-21 20:49:26 +00002277 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302278 /* In QNQ modes, if qnq bit is not set, then the packet was
2279 * tagged only with the transparent outer vlan-tag and must
2280 * not be treated as a vlan packet by host
2281 */
2282 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002283 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002284
Sathya Perla15d72182011-03-21 20:49:26 +00002285 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002286 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002287
Somnath Kotur939cf302011-08-18 21:51:49 -07002288 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302289 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002290 rxcp->vlanf = 0;
2291 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002292
2293 /* As the compl has been parsed, reset it; we wont touch it again */
2294 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295
Sathya Perla3abcded2010-10-03 22:12:27 -07002296 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297 return rxcp;
2298}
2299
Eric Dumazet1829b082011-03-01 05:48:12 +00002300static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002301{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002303
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002305 gfp |= __GFP_COMP;
2306 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307}
2308
2309/*
2310 * Allocate a page, split it to fragments of size rx_frag_size and post as
2311 * receive buffers to BE
2312 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302313static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314{
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002316 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002317 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002319 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320 struct be_eth_rx_d *rxd;
2321 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302322 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323
Sathya Perla3abcded2010-10-03 22:12:27 -07002324 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302325 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002327 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002329 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330 break;
2331 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002332 page_dmaaddr = dma_map_page(dev, pagep, 0,
2333 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002334 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002335 if (dma_mapping_error(dev, page_dmaaddr)) {
2336 put_page(pagep);
2337 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302338 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002339 break;
2340 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302341 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342 } else {
2343 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302344 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302346 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002347 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002348
2349 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302350 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2352 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353
2354 /* Any space left in the current big page for another frag? */
2355 if ((page_offset + rx_frag_size + rx_frag_size) >
2356 adapter->big_page_size) {
2357 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302358 page_info->last_frag = true;
2359 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2360 } else {
2361 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002363
2364 prev_page_info = page_info;
2365 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002366 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302368
2369 /* Mark the last frag of a page when we break out of the above loop
2370 * with no more slots available in the RXQ
2371 */
2372 if (pagep) {
2373 prev_page_info->last_frag = true;
2374 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2375 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376
2377 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302379 if (rxo->rx_post_starved)
2380 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302381 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002382 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302383 be_rxq_notify(adapter, rxq->id, notify);
2384 posted -= notify;
2385 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002386 } else if (atomic_read(&rxq->used) == 0) {
2387 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002388 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390}
2391
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302392static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302394 struct be_queue_info *tx_cq = &txo->cq;
2395 struct be_tx_compl_info *txcp = &txo->txcp;
2396 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002397
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302398 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002399 return NULL;
2400
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302401 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002402 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302403 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002404
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302405 txcp->status = GET_TX_COMPL_BITS(status, compl);
2406 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002407
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302408 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002409 queue_tail_inc(tx_cq);
2410 return txcp;
2411}
2412
Sathya Perla3c8def92011-06-12 20:01:58 +00002413static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302414 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002415{
Sathya Perla3c8def92011-06-12 20:01:58 +00002416 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002417 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002418 struct sk_buff *skb = NULL;
2419 bool unmap_skb_hdr = false;
2420 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302421 u16 num_wrbs = 0;
2422 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002423
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002424 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002425 if (sent_skbs[txq->tail]) {
2426 /* Free skb from prev req */
2427 if (skb)
2428 dev_consume_skb_any(skb);
2429 skb = sent_skbs[txq->tail];
2430 sent_skbs[txq->tail] = NULL;
2431 queue_tail_inc(txq); /* skip hdr wrb */
2432 num_wrbs++;
2433 unmap_skb_hdr = true;
2434 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002435 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002436 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002437 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002438 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002439 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002440 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002441 num_wrbs++;
2442 } while (frag_index != last_index);
2443 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002444
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002445 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002446}
2447
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002448/* Return the number of events in the event queue */
2449static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002450{
2451 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002452 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002453
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002454 do {
2455 eqe = queue_tail_node(&eqo->q);
2456 if (eqe->evt == 0)
2457 break;
2458
2459 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002460 eqe->evt = 0;
2461 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462 queue_tail_inc(&eqo->q);
2463 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002464
2465 return num;
2466}
2467
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002468/* Leaves the EQ is disarmed state */
2469static void be_eq_clean(struct be_eq_obj *eqo)
2470{
2471 int num = events_get(eqo);
2472
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002473 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002474}
2475
Kalesh AP99b44302015-08-05 03:27:49 -04002476/* Free posted rx buffers that were not used */
2477static void be_rxq_clean(struct be_rx_obj *rxo)
2478{
2479 struct be_queue_info *rxq = &rxo->q;
2480 struct be_rx_page_info *page_info;
2481
2482 while (atomic_read(&rxq->used) > 0) {
2483 page_info = get_rx_page_info(rxo);
2484 put_page(page_info->page);
2485 memset(page_info, 0, sizeof(*page_info));
2486 }
2487 BUG_ON(atomic_read(&rxq->used));
2488 rxq->tail = 0;
2489 rxq->head = 0;
2490}
2491
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002492static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002493{
Sathya Perla3abcded2010-10-03 22:12:27 -07002494 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002495 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002496 struct be_adapter *adapter = rxo->adapter;
2497 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002498
Sathya Perlad23e9462012-12-17 19:38:51 +00002499 /* Consume pending rx completions.
2500 * Wait for the flush completion (identified by zero num_rcvd)
2501 * to arrive. Notify CQ even when there are no more CQ entries
2502 * for HW to flush partially coalesced CQ entries.
2503 * In Lancer, there is no need to wait for flush compl.
2504 */
2505 for (;;) {
2506 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302507 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002508 if (lancer_chip(adapter))
2509 break;
2510
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302511 if (flush_wait++ > 50 ||
2512 be_check_error(adapter,
2513 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002514 dev_warn(&adapter->pdev->dev,
2515 "did not receive flush compl\n");
2516 break;
2517 }
2518 be_cq_notify(adapter, rx_cq->id, true, 0);
2519 mdelay(1);
2520 } else {
2521 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002522 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002523 if (rxcp->num_rcvd == 0)
2524 break;
2525 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002526 }
2527
Sathya Perlad23e9462012-12-17 19:38:51 +00002528 /* After cleanup, leave the CQ in unarmed state */
2529 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002530}
2531
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002532static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002534 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302535 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302536 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002537 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302538 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302539 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002540 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302542 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002543 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002544 pending_txqs = adapter->num_tx_qs;
2545
2546 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302547 cmpl = 0;
2548 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002549 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302550 while ((txcp = be_tx_compl_get(txo))) {
2551 num_wrbs +=
2552 be_tx_compl_process(adapter, txo,
2553 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002554 cmpl++;
2555 }
2556 if (cmpl) {
2557 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2558 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302559 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002560 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302561 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002562 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002563 }
2564
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302565 if (pending_txqs == 0 || ++timeo > 10 ||
2566 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002567 break;
2568
2569 mdelay(1);
2570 } while (true);
2571
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002572 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002573 for_all_tx_queues(adapter, txo, i) {
2574 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002575
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002576 if (atomic_read(&txq->used)) {
2577 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2578 i, atomic_read(&txq->used));
2579 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002580 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002581 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2582 txq->len);
2583 /* Use the tx-compl process logic to handle requests
2584 * that were not sent to the HW.
2585 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002586 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2587 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002588 BUG_ON(atomic_read(&txq->used));
2589 txo->pend_wrb_cnt = 0;
2590 /* Since hw was never notified of these requests,
2591 * reset TXQ indices
2592 */
2593 txq->head = notified_idx;
2594 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002595 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002596 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597}
2598
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002599static void be_evt_queues_destroy(struct be_adapter *adapter)
2600{
2601 struct be_eq_obj *eqo;
2602 int i;
2603
2604 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002605 if (eqo->q.created) {
2606 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002607 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302608 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302609 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002610 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002611 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002612 be_queue_free(adapter, &eqo->q);
2613 }
2614}
2615
2616static int be_evt_queues_create(struct be_adapter *adapter)
2617{
2618 struct be_queue_info *eq;
2619 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302620 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002621 int i, rc;
2622
Sathya Perla92bf14a2013-08-27 16:57:32 +05302623 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2624 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002625
2626 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302627 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002628
Sathya Perla2632baf2013-10-01 16:00:00 +05302629 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002630 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302632 aic->max_eqd = BE_MAX_EQD;
2633 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002634
2635 eq = &eqo->q;
2636 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302637 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638 if (rc)
2639 return rc;
2640
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302641 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002642 if (rc)
2643 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002644
2645 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2646 return -ENOMEM;
2647 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2648 eqo->affinity_mask);
2649 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2650 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002651 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002652 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002653}
2654
Sathya Perla5fb379e2009-06-18 00:02:59 +00002655static void be_mcc_queues_destroy(struct be_adapter *adapter)
2656{
2657 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002658
Sathya Perla8788fdc2009-07-27 22:52:03 +00002659 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002660 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002661 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002662 be_queue_free(adapter, q);
2663
Sathya Perla8788fdc2009-07-27 22:52:03 +00002664 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002665 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002666 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002667 be_queue_free(adapter, q);
2668}
2669
2670/* Must be called only after TX qs are created as MCC shares TX EQ */
2671static int be_mcc_queues_create(struct be_adapter *adapter)
2672{
2673 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002674
Sathya Perla8788fdc2009-07-27 22:52:03 +00002675 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002676 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302677 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002678 goto err;
2679
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002680 /* Use the default EQ for MCC completions */
2681 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002682 goto mcc_cq_free;
2683
Sathya Perla8788fdc2009-07-27 22:52:03 +00002684 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002685 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2686 goto mcc_cq_destroy;
2687
Sathya Perla8788fdc2009-07-27 22:52:03 +00002688 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002689 goto mcc_q_free;
2690
2691 return 0;
2692
2693mcc_q_free:
2694 be_queue_free(adapter, q);
2695mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002696 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002697mcc_cq_free:
2698 be_queue_free(adapter, cq);
2699err:
2700 return -1;
2701}
2702
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002703static void be_tx_queues_destroy(struct be_adapter *adapter)
2704{
2705 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002706 struct be_tx_obj *txo;
2707 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002708
Sathya Perla3c8def92011-06-12 20:01:58 +00002709 for_all_tx_queues(adapter, txo, i) {
2710 q = &txo->q;
2711 if (q->created)
2712 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2713 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002714
Sathya Perla3c8def92011-06-12 20:01:58 +00002715 q = &txo->cq;
2716 if (q->created)
2717 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2718 be_queue_free(adapter, q);
2719 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002720}
2721
Sathya Perla77071332013-08-27 16:57:34 +05302722static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002723{
Sathya Perla73f394e2015-03-26 03:05:09 -04002724 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002725 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002726 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302727 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002728
Sathya Perla92bf14a2013-08-27 16:57:32 +05302729 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002730
Sathya Perla3c8def92011-06-12 20:01:58 +00002731 for_all_tx_queues(adapter, txo, i) {
2732 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002733 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2734 sizeof(struct be_eth_tx_compl));
2735 if (status)
2736 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002737
John Stultz827da442013-10-07 15:51:58 -07002738 u64_stats_init(&txo->stats.sync);
2739 u64_stats_init(&txo->stats.sync_compl);
2740
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002741 /* If num_evt_qs is less than num_tx_qs, then more than
2742 * one txq share an eq
2743 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002744 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2745 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002746 if (status)
2747 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002748
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002749 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2750 sizeof(struct be_eth_wrb));
2751 if (status)
2752 return status;
2753
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002754 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002755 if (status)
2756 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002757
2758 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2759 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002760 }
2761
Sathya Perlad3791422012-09-28 04:39:44 +00002762 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2763 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002764 return 0;
2765}
2766
2767static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002768{
2769 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002770 struct be_rx_obj *rxo;
2771 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002772
Sathya Perla3abcded2010-10-03 22:12:27 -07002773 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002774 q = &rxo->cq;
2775 if (q->created)
2776 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2777 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002778 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002779}
2780
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002782{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002783 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002784 struct be_rx_obj *rxo;
2785 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786
Sathya Perla92bf14a2013-08-27 16:57:32 +05302787 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002788 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302789
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002790 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2791 if (adapter->num_rss_qs <= 1)
2792 adapter->num_rss_qs = 0;
2793
2794 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2795
2796 /* When the interface is not capable of RSS rings (and there is no
2797 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002799 if (adapter->num_rx_qs == 0)
2800 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302801
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002802 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002803 for_all_rx_queues(adapter, rxo, i) {
2804 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002805 cq = &rxo->cq;
2806 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302807 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002808 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002809 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002810
John Stultz827da442013-10-07 15:51:58 -07002811 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002812 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2813 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002814 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002815 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002816 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002817
Sathya Perlad3791422012-09-28 04:39:44 +00002818 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002819 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002821}
2822
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002823static irqreturn_t be_intx(int irq, void *dev)
2824{
Sathya Perlae49cc342012-11-27 19:50:02 +00002825 struct be_eq_obj *eqo = dev;
2826 struct be_adapter *adapter = eqo->adapter;
2827 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002828
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002829 /* IRQ is not expected when NAPI is scheduled as the EQ
2830 * will not be armed.
2831 * But, this can happen on Lancer INTx where it takes
2832 * a while to de-assert INTx or in BE2 where occasionaly
2833 * an interrupt may be raised even when EQ is unarmed.
2834 * If NAPI is already scheduled, then counting & notifying
2835 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002836 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002837 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002838 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002839 __napi_schedule(&eqo->napi);
2840 if (num_evts)
2841 eqo->spurious_intr = 0;
2842 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002843 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002844
2845 /* Return IRQ_HANDLED only for the the first spurious intr
2846 * after a valid intr to stop the kernel from branding
2847 * this irq as a bad one!
2848 */
2849 if (num_evts || eqo->spurious_intr++ == 0)
2850 return IRQ_HANDLED;
2851 else
2852 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853}
2854
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002855static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002856{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002857 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002858
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002859 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002860 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002861 return IRQ_HANDLED;
2862}
2863
Sathya Perla2e588f82011-03-11 02:49:26 +00002864static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002865{
Somnath Koture38b1702013-05-29 22:55:56 +00002866 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002867}
2868
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002869static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302870 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002871{
Sathya Perla3abcded2010-10-03 22:12:27 -07002872 struct be_adapter *adapter = rxo->adapter;
2873 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002874 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002875 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302876 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002877
2878 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002879 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002880 if (!rxcp)
2881 break;
2882
Sathya Perla12004ae2011-08-02 19:57:46 +00002883 /* Is it a flush compl that has no data */
2884 if (unlikely(rxcp->num_rcvd == 0))
2885 goto loop_continue;
2886
2887 /* Discard compl with partial DMA Lancer B0 */
2888 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002889 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002890 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002891 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002892
Sathya Perla12004ae2011-08-02 19:57:46 +00002893 /* On BE drop pkts that arrive due to imperfect filtering in
2894 * promiscuous mode on some skews
2895 */
2896 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302897 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002898 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002899 goto loop_continue;
2900 }
2901
Sathya Perla6384a4d2013-10-25 10:40:16 +05302902 /* Don't do gro when we're busy_polling */
2903 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002904 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002905 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302906 be_rx_compl_process(rxo, napi, rxcp);
2907
Sathya Perla12004ae2011-08-02 19:57:46 +00002908loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302909 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002910 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002911 }
2912
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002913 if (work_done) {
2914 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002915
Sathya Perla6384a4d2013-10-25 10:40:16 +05302916 /* When an rx-obj gets into post_starved state, just
2917 * let be_worker do the posting.
2918 */
2919 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2920 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302921 be_post_rx_frags(rxo, GFP_ATOMIC,
2922 max_t(u32, MAX_RX_POST,
2923 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002925
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002926 return work_done;
2927}
2928
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302929static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302930{
2931 switch (status) {
2932 case BE_TX_COMP_HDR_PARSE_ERR:
2933 tx_stats(txo)->tx_hdr_parse_err++;
2934 break;
2935 case BE_TX_COMP_NDMA_ERR:
2936 tx_stats(txo)->tx_dma_err++;
2937 break;
2938 case BE_TX_COMP_ACL_ERR:
2939 tx_stats(txo)->tx_spoof_check_err++;
2940 break;
2941 }
2942}
2943
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302944static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302945{
2946 switch (status) {
2947 case LANCER_TX_COMP_LSO_ERR:
2948 tx_stats(txo)->tx_tso_err++;
2949 break;
2950 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2951 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2952 tx_stats(txo)->tx_spoof_check_err++;
2953 break;
2954 case LANCER_TX_COMP_QINQ_ERR:
2955 tx_stats(txo)->tx_qinq_err++;
2956 break;
2957 case LANCER_TX_COMP_PARITY_ERR:
2958 tx_stats(txo)->tx_internal_parity_err++;
2959 break;
2960 case LANCER_TX_COMP_DMA_ERR:
2961 tx_stats(txo)->tx_dma_err++;
2962 break;
2963 }
2964}
2965
Sathya Perlac8f64612014-09-02 09:56:55 +05302966static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2967 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968{
Sathya Perlac8f64612014-09-02 09:56:55 +05302969 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302970 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002971
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302972 while ((txcp = be_tx_compl_get(txo))) {
2973 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302974 work_done++;
2975
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302976 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302977 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302978 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302979 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302980 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302981 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002982 }
2983
2984 if (work_done) {
2985 be_cq_notify(adapter, txo->cq.id, true, work_done);
2986 atomic_sub(num_wrbs, &txo->q.used);
2987
2988 /* As Tx wrbs have been freed up, wake up netdev queue
2989 * if it was stopped due to lack of tx wrbs. */
2990 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302991 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002992 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002993 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002994
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002995 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2996 tx_stats(txo)->tx_compl += work_done;
2997 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2998 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002999}
Sathya Perla3c8def92011-06-12 20:01:58 +00003000
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003001#ifdef CONFIG_NET_RX_BUSY_POLL
3002static inline bool be_lock_napi(struct be_eq_obj *eqo)
3003{
3004 bool status = true;
3005
3006 spin_lock(&eqo->lock); /* BH is already disabled */
3007 if (eqo->state & BE_EQ_LOCKED) {
3008 WARN_ON(eqo->state & BE_EQ_NAPI);
3009 eqo->state |= BE_EQ_NAPI_YIELD;
3010 status = false;
3011 } else {
3012 eqo->state = BE_EQ_NAPI;
3013 }
3014 spin_unlock(&eqo->lock);
3015 return status;
3016}
3017
3018static inline void be_unlock_napi(struct be_eq_obj *eqo)
3019{
3020 spin_lock(&eqo->lock); /* BH is already disabled */
3021
3022 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3023 eqo->state = BE_EQ_IDLE;
3024
3025 spin_unlock(&eqo->lock);
3026}
3027
3028static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3029{
3030 bool status = true;
3031
3032 spin_lock_bh(&eqo->lock);
3033 if (eqo->state & BE_EQ_LOCKED) {
3034 eqo->state |= BE_EQ_POLL_YIELD;
3035 status = false;
3036 } else {
3037 eqo->state |= BE_EQ_POLL;
3038 }
3039 spin_unlock_bh(&eqo->lock);
3040 return status;
3041}
3042
3043static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3044{
3045 spin_lock_bh(&eqo->lock);
3046
3047 WARN_ON(eqo->state & (BE_EQ_NAPI));
3048 eqo->state = BE_EQ_IDLE;
3049
3050 spin_unlock_bh(&eqo->lock);
3051}
3052
3053static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3054{
3055 spin_lock_init(&eqo->lock);
3056 eqo->state = BE_EQ_IDLE;
3057}
3058
3059static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3060{
3061 local_bh_disable();
3062
3063 /* It's enough to just acquire napi lock on the eqo to stop
3064 * be_busy_poll() from processing any queueus.
3065 */
3066 while (!be_lock_napi(eqo))
3067 mdelay(1);
3068
3069 local_bh_enable();
3070}
3071
3072#else /* CONFIG_NET_RX_BUSY_POLL */
3073
3074static inline bool be_lock_napi(struct be_eq_obj *eqo)
3075{
3076 return true;
3077}
3078
3079static inline void be_unlock_napi(struct be_eq_obj *eqo)
3080{
3081}
3082
3083static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3084{
3085 return false;
3086}
3087
3088static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3089{
3090}
3091
3092static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3093{
3094}
3095
3096static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3097{
3098}
3099#endif /* CONFIG_NET_RX_BUSY_POLL */
3100
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303101int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003102{
3103 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3104 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003105 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303106 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303107 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003108 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003109
Sathya Perla0b545a62012-11-23 00:27:18 +00003110 num_evts = events_get(eqo);
3111
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303112 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3113 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003114
Sathya Perla6384a4d2013-10-25 10:40:16 +05303115 if (be_lock_napi(eqo)) {
3116 /* This loop will iterate twice for EQ0 in which
3117 * completions of the last RXQ (default one) are also processed
3118 * For other EQs the loop iterates only once
3119 */
3120 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3121 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3122 max_work = max(work, max_work);
3123 }
3124 be_unlock_napi(eqo);
3125 } else {
3126 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003127 }
3128
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003129 if (is_mcc_eqo(eqo))
3130 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003131
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003132 if (max_work < budget) {
3133 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003134
3135 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3136 * delay via a delay multiplier encoding value
3137 */
3138 if (skyhawk_chip(adapter))
3139 mult_enc = be_get_eq_delay_mult_enc(eqo);
3140
3141 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3142 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003143 } else {
3144 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003145 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003146 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003147 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003148}
3149
Sathya Perla6384a4d2013-10-25 10:40:16 +05303150#ifdef CONFIG_NET_RX_BUSY_POLL
3151static int be_busy_poll(struct napi_struct *napi)
3152{
3153 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3154 struct be_adapter *adapter = eqo->adapter;
3155 struct be_rx_obj *rxo;
3156 int i, work = 0;
3157
3158 if (!be_lock_busy_poll(eqo))
3159 return LL_FLUSH_BUSY;
3160
3161 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3162 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3163 if (work)
3164 break;
3165 }
3166
3167 be_unlock_busy_poll(eqo);
3168 return work;
3169}
3170#endif
3171
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003172void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003173{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003174 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3175 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003176 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303177 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003178
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303179 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003180 return;
3181
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003182 if (lancer_chip(adapter)) {
3183 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3184 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303185 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003186 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303187 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003188 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303189 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303190 /* Do not log error messages if its a FW reset */
3191 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3192 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3193 dev_info(dev, "Firmware update in progress\n");
3194 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303195 dev_err(dev, "Error detected in the card\n");
3196 dev_err(dev, "ERR: sliport status 0x%x\n",
3197 sliport_status);
3198 dev_err(dev, "ERR: sliport error1 0x%x\n",
3199 sliport_err1);
3200 dev_err(dev, "ERR: sliport error2 0x%x\n",
3201 sliport_err2);
3202 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003203 }
3204 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003205 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3206 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3207 ue_lo_mask = ioread32(adapter->pcicfg +
3208 PCICFG_UE_STATUS_LOW_MASK);
3209 ue_hi_mask = ioread32(adapter->pcicfg +
3210 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003211
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003212 ue_lo = (ue_lo & ~ue_lo_mask);
3213 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003214
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303215 /* On certain platforms BE hardware can indicate spurious UEs.
3216 * Allow HW to stop working completely in case of a real UE.
3217 * Hence not setting the hw_error for UE detection.
3218 */
3219
3220 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303221 dev_err(dev,
3222 "Unrecoverable Error detected in the adapter");
3223 dev_err(dev, "Please reboot server to recover");
3224 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303225 be_set_error(adapter, BE_ERROR_UE);
3226
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303227 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3228 if (ue_lo & 1)
3229 dev_err(dev, "UE: %s bit set\n",
3230 ue_status_low_desc[i]);
3231 }
3232 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3233 if (ue_hi & 1)
3234 dev_err(dev, "UE: %s bit set\n",
3235 ue_status_hi_desc[i]);
3236 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303237 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003238 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003239}
3240
Sathya Perla8d56ff12009-11-22 22:02:26 +00003241static void be_msix_disable(struct be_adapter *adapter)
3242{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003243 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003244 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003245 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303246 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003247 }
3248}
3249
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003250static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003251{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003252 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003253 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003254
Sathya Perla92bf14a2013-08-27 16:57:32 +05303255 /* If RoCE is supported, program the max number of NIC vectors that
3256 * may be configured via set-channels, along with vectors needed for
3257 * RoCe. Else, just program the number we'll use initially.
3258 */
3259 if (be_roce_supported(adapter))
3260 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3261 2 * num_online_cpus());
3262 else
3263 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003264
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003265 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003266 adapter->msix_entries[i].entry = i;
3267
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003268 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3269 MIN_MSIX_VECTORS, num_vec);
3270 if (num_vec < 0)
3271 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003272
Sathya Perla92bf14a2013-08-27 16:57:32 +05303273 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3274 adapter->num_msix_roce_vec = num_vec / 2;
3275 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3276 adapter->num_msix_roce_vec);
3277 }
3278
3279 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3280
3281 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3282 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003283 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003284
3285fail:
3286 dev_warn(dev, "MSIx enable failed\n");
3287
3288 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003289 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003290 return num_vec;
3291 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003292}
3293
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003294static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303295 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003296{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303297 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003298}
3299
3300static int be_msix_register(struct be_adapter *adapter)
3301{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003302 struct net_device *netdev = adapter->netdev;
3303 struct be_eq_obj *eqo;
3304 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003305
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003306 for_all_evt_queues(adapter, eqo, i) {
3307 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3308 vec = be_msix_vec_get(adapter, eqo);
3309 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003310 if (status)
3311 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003312
3313 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003314 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003315
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003316 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003317err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303318 for (i--; i >= 0; i--) {
3319 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003320 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303321 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003322 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303323 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003324 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003325 return status;
3326}
3327
3328static int be_irq_register(struct be_adapter *adapter)
3329{
3330 struct net_device *netdev = adapter->netdev;
3331 int status;
3332
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003333 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334 status = be_msix_register(adapter);
3335 if (status == 0)
3336 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003337 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003338 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003339 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003340 }
3341
Sathya Perlae49cc342012-11-27 19:50:02 +00003342 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003343 netdev->irq = adapter->pdev->irq;
3344 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003345 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003346 if (status) {
3347 dev_err(&adapter->pdev->dev,
3348 "INTx request IRQ failed - err %d\n", status);
3349 return status;
3350 }
3351done:
3352 adapter->isr_registered = true;
3353 return 0;
3354}
3355
3356static void be_irq_unregister(struct be_adapter *adapter)
3357{
3358 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003359 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003360 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003361
3362 if (!adapter->isr_registered)
3363 return;
3364
3365 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003366 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003367 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003368 goto done;
3369 }
3370
3371 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003372 for_all_evt_queues(adapter, eqo, i) {
3373 vec = be_msix_vec_get(adapter, eqo);
3374 irq_set_affinity_hint(vec, NULL);
3375 free_irq(vec, eqo);
3376 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003377
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003378done:
3379 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003380}
3381
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003382static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003383{
Ajit Khaparde62219062016-02-10 22:45:53 +05303384 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003385 struct be_queue_info *q;
3386 struct be_rx_obj *rxo;
3387 int i;
3388
3389 for_all_rx_queues(adapter, rxo, i) {
3390 q = &rxo->q;
3391 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003392 /* If RXQs are destroyed while in an "out of buffer"
3393 * state, there is a possibility of an HW stall on
3394 * Lancer. So, post 64 buffers to each queue to relieve
3395 * the "out of buffer" condition.
3396 * Make sure there's space in the RXQ before posting.
3397 */
3398 if (lancer_chip(adapter)) {
3399 be_rx_cq_clean(rxo);
3400 if (atomic_read(&q->used) == 0)
3401 be_post_rx_frags(rxo, GFP_KERNEL,
3402 MAX_RX_POST);
3403 }
3404
Sathya Perla482c9e72011-06-29 23:33:17 +00003405 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003406 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003407 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003408 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003409 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003410 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303411
3412 if (rss->rss_flags) {
3413 rss->rss_flags = RSS_ENABLE_NONE;
3414 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3415 128, rss->rss_hkey);
3416 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003417}
3418
Kalesh APbcc84142015-08-05 03:27:48 -04003419static void be_disable_if_filters(struct be_adapter *adapter)
3420{
3421 be_cmd_pmac_del(adapter, adapter->if_handle,
3422 adapter->pmac_id[0], 0);
3423
3424 be_clear_uc_list(adapter);
3425
3426 /* The IFACE flags are enabled in the open path and cleared
3427 * in the close path. When a VF gets detached from the host and
3428 * assigned to a VM the following happens:
3429 * - VF's IFACE flags get cleared in the detach path
3430 * - IFACE create is issued by the VF in the attach path
3431 * Due to a bug in the BE3/Skyhawk-R FW
3432 * (Lancer FW doesn't have the bug), the IFACE capability flags
3433 * specified along with the IFACE create cmd issued by a VF are not
3434 * honoured by FW. As a consequence, if a *new* driver
3435 * (that enables/disables IFACE flags in open/close)
3436 * is loaded in the host and an *old* driver is * used by a VM/VF,
3437 * the IFACE gets created *without* the needed flags.
3438 * To avoid this, disable RX-filter flags only for Lancer.
3439 */
3440 if (lancer_chip(adapter)) {
3441 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3442 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3443 }
3444}
3445
Sathya Perla889cd4b2010-05-30 23:33:45 +00003446static int be_close(struct net_device *netdev)
3447{
3448 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003449 struct be_eq_obj *eqo;
3450 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003451
Kalesh APe1ad8e32014-04-14 16:12:41 +05303452 /* This protection is needed as be_close() may be called even when the
3453 * adapter is in cleared state (after eeh perm failure)
3454 */
3455 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3456 return 0;
3457
Kalesh APbcc84142015-08-05 03:27:48 -04003458 be_disable_if_filters(adapter);
3459
Ivan Veceradff345c52013-11-27 08:59:32 +01003460 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3461 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003462 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303463 be_disable_busy_poll(eqo);
3464 }
David S. Miller71237b62013-11-28 18:53:36 -05003465 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003466 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003467
3468 be_async_mcc_disable(adapter);
3469
3470 /* Wait for all pending tx completions to arrive so that
3471 * all tx skbs are freed.
3472 */
Sathya Perlafba87552013-05-08 02:05:50 +00003473 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303474 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003475
3476 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003477
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003478 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003479 if (msix_enabled(adapter))
3480 synchronize_irq(be_msix_vec_get(adapter, eqo));
3481 else
3482 synchronize_irq(netdev->irq);
3483 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003484 }
3485
Sathya Perla889cd4b2010-05-30 23:33:45 +00003486 be_irq_unregister(adapter);
3487
Sathya Perla482c9e72011-06-29 23:33:17 +00003488 return 0;
3489}
3490
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003491static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003492{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003493 struct rss_info *rss = &adapter->rss_info;
3494 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003495 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003496 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003497
3498 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003499 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3500 sizeof(struct be_eth_rx_d));
3501 if (rc)
3502 return rc;
3503 }
3504
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003505 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3506 rxo = default_rxo(adapter);
3507 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3508 rx_frag_size, adapter->if_handle,
3509 false, &rxo->rss_id);
3510 if (rc)
3511 return rc;
3512 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003513
3514 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003515 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003516 rx_frag_size, adapter->if_handle,
3517 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003518 if (rc)
3519 return rc;
3520 }
3521
3522 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003523 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003524 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303525 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003526 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303527 rss->rsstable[j + i] = rxo->rss_id;
3528 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003529 }
3530 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303531 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3532 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003533
3534 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303535 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3536 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303537
3538 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3539 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3540 RSS_INDIR_TABLE_LEN, rss_key);
3541 if (rc) {
3542 rss->rss_flags = RSS_ENABLE_NONE;
3543 return rc;
3544 }
3545
3546 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303547 } else {
3548 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303549 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303550 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003551
Venkata Duvvurue2557872014-04-21 15:38:00 +05303552
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003553 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3554 * which is a queue empty condition
3555 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003556 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003557 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3558
Sathya Perla889cd4b2010-05-30 23:33:45 +00003559 return 0;
3560}
3561
Kalesh APbcc84142015-08-05 03:27:48 -04003562static int be_enable_if_filters(struct be_adapter *adapter)
3563{
3564 int status;
3565
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003566 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003567 if (status)
3568 return status;
3569
3570 /* For BE3 VFs, the PF programs the initial MAC address */
3571 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3572 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3573 adapter->if_handle,
3574 &adapter->pmac_id[0], 0);
3575 if (status)
3576 return status;
3577 }
3578
3579 if (adapter->vlans_added)
3580 be_vid_config(adapter);
3581
3582 be_set_rx_mode(adapter->netdev);
3583
3584 return 0;
3585}
3586
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003587static int be_open(struct net_device *netdev)
3588{
3589 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003590 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003591 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003592 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003593 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003594 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003595
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003596 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003597 if (status)
3598 goto err;
3599
Kalesh APbcc84142015-08-05 03:27:48 -04003600 status = be_enable_if_filters(adapter);
3601 if (status)
3602 goto err;
3603
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003604 status = be_irq_register(adapter);
3605 if (status)
3606 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003607
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003608 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003609 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003610
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003611 for_all_tx_queues(adapter, txo, i)
3612 be_cq_notify(adapter, txo->cq.id, true, 0);
3613
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003614 be_async_mcc_enable(adapter);
3615
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003616 for_all_evt_queues(adapter, eqo, i) {
3617 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303618 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003619 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003620 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003621 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003622
Sathya Perla323ff712012-09-28 04:39:43 +00003623 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003624 if (!status)
3625 be_link_status_update(adapter, link_status);
3626
Sathya Perlafba87552013-05-08 02:05:50 +00003627 netif_tx_start_all_queues(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303628#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303629 if (skyhawk_chip(adapter))
3630 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303631#endif
3632
Sathya Perla889cd4b2010-05-30 23:33:45 +00003633 return 0;
3634err:
3635 be_close(adapter->netdev);
3636 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003637}
3638
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003639static int be_setup_wol(struct be_adapter *adapter, bool enable)
3640{
Kalesh Purayil145155e2015-07-10 05:32:43 -04003641 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003642 struct be_dma_mem cmd;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003643 u8 mac[ETH_ALEN];
Kalesh Purayil145155e2015-07-10 05:32:43 -04003644 int status;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003645
Joe Perchesc7bf7162015-03-02 19:54:47 -08003646 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003647
3648 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Kalesh Purayil145155e2015-07-10 05:32:43 -04003649 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303650 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303651 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003652
3653 if (enable) {
3654 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303655 PCICFG_PM_CONTROL_OFFSET,
3656 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003657 if (status) {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003658 dev_err(dev, "Could not enable Wake-on-lan\n");
3659 goto err;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003660 }
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003661 } else {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003662 ether_addr_copy(mac, adapter->netdev->dev_addr);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003663 }
3664
Kalesh Purayil145155e2015-07-10 05:32:43 -04003665 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3666 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3667 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3668err:
3669 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003670 return status;
3671}
3672
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003673static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3674{
3675 u32 addr;
3676
3677 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3678
3679 mac[5] = (u8)(addr & 0xFF);
3680 mac[4] = (u8)((addr >> 8) & 0xFF);
3681 mac[3] = (u8)((addr >> 16) & 0xFF);
3682 /* Use the OUI from the current MAC address */
3683 memcpy(mac, adapter->netdev->dev_addr, 3);
3684}
3685
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003686/*
3687 * Generate a seed MAC address from the PF MAC Address using jhash.
3688 * MAC Address for VFs are assigned incrementally starting from the seed.
3689 * These addresses are programmed in the ASIC by the PF and the VF driver
3690 * queries for the MAC address during its probe.
3691 */
Sathya Perla4c876612013-02-03 20:30:11 +00003692static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003693{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003694 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003695 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003696 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003697 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003698
3699 be_vf_eth_addr_generate(adapter, mac);
3700
Sathya Perla11ac75e2011-12-13 00:58:50 +00003701 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303702 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003703 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003704 vf_cfg->if_handle,
3705 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303706 else
3707 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3708 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003709
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003710 if (status)
3711 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303712 "Mac address assignment failed for VF %d\n",
3713 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003714 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003715 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003716
3717 mac[5] += 1;
3718 }
3719 return status;
3720}
3721
Sathya Perla4c876612013-02-03 20:30:11 +00003722static int be_vfs_mac_query(struct be_adapter *adapter)
3723{
3724 int status, vf;
3725 u8 mac[ETH_ALEN];
3726 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003727
3728 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303729 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3730 mac, vf_cfg->if_handle,
3731 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003732 if (status)
3733 return status;
3734 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3735 }
3736 return 0;
3737}
3738
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003739static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003740{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003741 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003742 u32 vf;
3743
Sathya Perla257a3fe2013-06-14 15:54:51 +05303744 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003745 dev_warn(&adapter->pdev->dev,
3746 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003747 goto done;
3748 }
3749
Sathya Perlab4c1df92013-05-08 02:05:47 +00003750 pci_disable_sriov(adapter->pdev);
3751
Sathya Perla11ac75e2011-12-13 00:58:50 +00003752 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303753 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003754 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3755 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303756 else
3757 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3758 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003759
Sathya Perla11ac75e2011-12-13 00:58:50 +00003760 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3761 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003762done:
3763 kfree(adapter->vf_cfg);
3764 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303765 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003766}
3767
Sathya Perla77071332013-08-27 16:57:34 +05303768static void be_clear_queues(struct be_adapter *adapter)
3769{
3770 be_mcc_queues_destroy(adapter);
3771 be_rx_cqs_destroy(adapter);
3772 be_tx_queues_destroy(adapter);
3773 be_evt_queues_destroy(adapter);
3774}
3775
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303776static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003777{
Sathya Perla191eb752012-02-23 18:50:13 +00003778 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3779 cancel_delayed_work_sync(&adapter->work);
3780 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3781 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303782}
3783
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003784static void be_cancel_err_detection(struct be_adapter *adapter)
3785{
3786 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3787 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3788 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3789 }
3790}
3791
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303792#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303793static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3794{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003795 struct net_device *netdev = adapter->netdev;
3796
Sathya Perlac9c47142014-03-27 10:46:19 +05303797 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3798 be_cmd_manage_iface(adapter, adapter->if_handle,
3799 OP_CONVERT_TUNNEL_TO_NORMAL);
3800
3801 if (adapter->vxlan_port)
3802 be_cmd_set_vxlan_port(adapter, 0);
3803
3804 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3805 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003806
3807 netdev->hw_enc_features = 0;
3808 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303809 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303810}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303811#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303812
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003813static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3814 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05003815{
3816 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003817 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3818 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003819 u16 num_vf_qs = 1;
3820
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05303821 /* Distribute the queue resources among the PF and it's VFs
Vasundhara Volamf2858732015-03-04 00:44:33 -05003822 * Do not distribute queue resources in multi-channel configuration.
3823 */
3824 if (num_vfs && !be_is_mc(adapter)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003825 /* Divide the rx queues evenly among the VFs and the PF, capped
3826 * at VF-EQ-count. Any remainder queues belong to the PF.
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05303827 */
3828 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3829 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05003830
3831 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3832 * interfaces per port. Provide RSS on VFs, only if number
3833 * of VFs requested is less than MAX_RSS_IFACES limit.
3834 */
3835 if (num_vfs >= MAX_RSS_IFACES)
3836 num_vf_qs = 1;
3837 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003838
3839 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3840 * which are modifiable using SET_PROFILE_CONFIG cmd.
3841 */
3842 be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
3843
3844 /* If RSS IFACE capability flags are modifiable for a VF, set the
3845 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3846 * more than 1 RSSQ is available for a VF.
3847 * Otherwise, provision only 1 queue pair for VF.
3848 */
3849 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3850 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3851 if (num_vf_qs > 1) {
3852 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3853 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3854 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3855 } else {
3856 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3857 BE_IF_FLAGS_DEFQ_RSS);
3858 }
3859 } else {
3860 num_vf_qs = 1;
3861 }
3862
3863 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3864 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3865 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3866 }
3867
3868 vft_res->vf_if_cap_flags = vf_if_cap_flags;
3869 vft_res->max_rx_qs = num_vf_qs;
3870 vft_res->max_rss_qs = num_vf_qs;
3871 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
3872 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
3873
3874 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3875 * among the PF and it's VFs, if the fields are changeable
3876 */
3877 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3878 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
3879
3880 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3881 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
3882
3883 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3884 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
3885
3886 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3887 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05003888}
3889
Somnath Koturb05004a2013-12-05 12:08:16 +05303890static int be_clear(struct be_adapter *adapter)
3891{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003892 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003893 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003894
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303895 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003896
Sathya Perla11ac75e2011-12-13 00:58:50 +00003897 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003898 be_vf_clear(adapter);
3899
Vasundhara Volambec84e62014-06-30 13:01:32 +05303900 /* Re-configure FW to distribute resources evenly across max-supported
3901 * number of VFs, only when VFs are not already enabled.
3902 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003903 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3904 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003905 be_calculate_vf_res(adapter,
3906 pci_sriov_get_totalvfs(pdev),
3907 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303908 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003909 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003910 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05003911 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303912
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303913#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303914 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303915#endif
Kalesh APbcc84142015-08-05 03:27:48 -04003916 kfree(adapter->pmac_id);
3917 adapter->pmac_id = NULL;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003918
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003919 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003920
Sathya Perla77071332013-08-27 16:57:34 +05303921 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003922
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003923 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303924 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003925 return 0;
3926}
3927
Sathya Perla4c876612013-02-03 20:30:11 +00003928static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003929{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303930 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04003931 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00003932 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003933 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003934
Kalesh AP0700d812015-01-20 03:51:43 -05003935 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003936 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003937
Sathya Perla4c876612013-02-03 20:30:11 +00003938 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303939 if (!BE3_chip(adapter)) {
3940 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003941 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303942 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003943 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303944 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003945 /* Prevent VFs from enabling VLAN promiscuous
3946 * mode
3947 */
3948 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3949 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303950 }
Sathya Perla4c876612013-02-03 20:30:11 +00003951
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003952 /* PF should enable IF flags during proxy if_create call */
3953 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04003954 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3955 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003956 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003957 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003958 }
Kalesh AP0700d812015-01-20 03:51:43 -05003959
3960 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003961}
3962
Sathya Perla39f1d942012-05-08 19:41:24 +00003963static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003964{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003965 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003966 int vf;
3967
Sathya Perla39f1d942012-05-08 19:41:24 +00003968 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3969 GFP_KERNEL);
3970 if (!adapter->vf_cfg)
3971 return -ENOMEM;
3972
Sathya Perla11ac75e2011-12-13 00:58:50 +00003973 for_all_vfs(adapter, vf_cfg, vf) {
3974 vf_cfg->if_handle = -1;
3975 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003976 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003977 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003978}
3979
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003980static int be_vf_setup(struct be_adapter *adapter)
3981{
Sathya Perla4c876612013-02-03 20:30:11 +00003982 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303983 struct be_vf_cfg *vf_cfg;
3984 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003985 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003986
Sathya Perla257a3fe2013-06-14 15:54:51 +05303987 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003988
3989 status = be_vf_setup_init(adapter);
3990 if (status)
3991 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003992
Sathya Perla4c876612013-02-03 20:30:11 +00003993 if (old_vfs) {
3994 for_all_vfs(adapter, vf_cfg, vf) {
3995 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3996 if (status)
3997 goto err;
3998 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003999
Sathya Perla4c876612013-02-03 20:30:11 +00004000 status = be_vfs_mac_query(adapter);
4001 if (status)
4002 goto err;
4003 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304004 status = be_vfs_if_create(adapter);
4005 if (status)
4006 goto err;
4007
Sathya Perla39f1d942012-05-08 19:41:24 +00004008 status = be_vf_eth_addr_config(adapter);
4009 if (status)
4010 goto err;
4011 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004012
Sathya Perla11ac75e2011-12-13 00:58:50 +00004013 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304014 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004015 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4016 vf + 1);
4017 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304018 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004019 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304020 BE_PRIV_FILTMGMT,
4021 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004022 if (!status) {
4023 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304024 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4025 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004026 }
Sathya Perla04a06022013-07-23 15:25:00 +05304027 }
4028
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304029 /* Allow full available bandwidth */
4030 if (!old_vfs)
4031 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004032
Kalesh APe7bcbd72015-05-06 05:30:32 -04004033 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4034 vf_cfg->if_handle, NULL,
4035 &spoofchk);
4036 if (!status)
4037 vf_cfg->spoofchk = spoofchk;
4038
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304039 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304040 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304041 be_cmd_set_logical_link_config(adapter,
4042 IFLA_VF_LINK_STATE_AUTO,
4043 vf+1);
4044 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004045 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004046
4047 if (!old_vfs) {
4048 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4049 if (status) {
4050 dev_err(dev, "SRIOV enable failed\n");
4051 adapter->num_vfs = 0;
4052 goto err;
4053 }
4054 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304055
4056 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004057 return 0;
4058err:
Sathya Perla4c876612013-02-03 20:30:11 +00004059 dev_err(dev, "VF setup failed\n");
4060 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004061 return status;
4062}
4063
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304064/* Converting function_mode bits on BE3 to SH mc_type enums */
4065
4066static u8 be_convert_mc_type(u32 function_mode)
4067{
Suresh Reddy66064db2014-06-23 16:41:29 +05304068 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304069 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304070 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304071 return FLEX10;
4072 else if (function_mode & VNIC_MODE)
4073 return vNIC2;
4074 else if (function_mode & UMC_ENABLED)
4075 return UMC;
4076 else
4077 return MC_NONE;
4078}
4079
Sathya Perla92bf14a2013-08-27 16:57:32 +05304080/* On BE2/BE3 FW does not suggest the supported limits */
4081static void BEx_get_resources(struct be_adapter *adapter,
4082 struct be_resources *res)
4083{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304084 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304085
4086 if (be_physfn(adapter))
4087 res->max_uc_mac = BE_UC_PMAC_COUNT;
4088 else
4089 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4090
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304091 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4092
4093 if (be_is_mc(adapter)) {
4094 /* Assuming that there are 4 channels per port,
4095 * when multi-channel is enabled
4096 */
4097 if (be_is_qnq_mode(adapter))
4098 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4099 else
4100 /* In a non-qnq multichannel mode, the pvid
4101 * takes up one vlan entry
4102 */
4103 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4104 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304105 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304106 }
4107
Sathya Perla92bf14a2013-08-27 16:57:32 +05304108 res->max_mcast_mac = BE_MAX_MC;
4109
Vasundhara Volama5243da2014-03-11 18:53:07 +05304110 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4111 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4112 * *only* if it is RSS-capable.
4113 */
4114 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004115 be_virtfn(adapter) ||
4116 (be_is_mc(adapter) &&
4117 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304118 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304119 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4120 struct be_resources super_nic_res = {0};
4121
4122 /* On a SuperNIC profile, the driver needs to use the
4123 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4124 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05004125 be_cmd_get_profile_config(adapter, &super_nic_res,
4126 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304127 /* Some old versions of BE3 FW don't report max_tx_qs value */
4128 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4129 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304130 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304131 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304132
4133 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4134 !use_sriov && be_physfn(adapter))
4135 res->max_rss_qs = (adapter->be3_native) ?
4136 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4137 res->max_rx_qs = res->max_rss_qs + 1;
4138
Suresh Reddye3dc8672014-01-06 13:02:25 +05304139 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304140 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304141 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4142 else
4143 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304144
4145 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004146 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304147 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4148 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4149}
4150
Sathya Perla30128032011-11-10 19:17:57 +00004151static void be_setup_init(struct be_adapter *adapter)
4152{
4153 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004154 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004155 adapter->if_handle = -1;
4156 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004157 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304158 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004159 if (be_physfn(adapter))
4160 adapter->cmd_privileges = MAX_PRIVILEGES;
4161 else
4162 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004163}
4164
Vasundhara Volambec84e62014-06-30 13:01:32 +05304165static int be_get_sriov_config(struct be_adapter *adapter)
4166{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304167 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304168 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304169
Vasundhara Volamf2858732015-03-04 00:44:33 -05004170 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304171
Vasundhara Volamace40af2015-03-04 00:44:34 -05004172 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304173 if (BE3_chip(adapter) && !res.max_vfs) {
4174 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4175 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4176 }
4177
Sathya Perlad3d18312014-08-01 17:47:30 +05304178 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304179
Vasundhara Volamace40af2015-03-04 00:44:34 -05004180 /* If during previous unload of the driver, the VFs were not disabled,
4181 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4182 * Instead use the TotalVFs value stored in the pci-dev struct.
4183 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304184 old_vfs = pci_num_vf(adapter->pdev);
4185 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004186 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4187 old_vfs);
4188
4189 adapter->pool_res.max_vfs =
4190 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304191 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304192 }
4193
4194 return 0;
4195}
4196
Vasundhara Volamace40af2015-03-04 00:44:34 -05004197static void be_alloc_sriov_res(struct be_adapter *adapter)
4198{
4199 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004200 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004201 int status;
4202
4203 be_get_sriov_config(adapter);
4204
4205 if (!old_vfs)
4206 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4207
4208 /* When the HW is in SRIOV capable configuration, the PF-pool
4209 * resources are given to PF during driver load, if there are no
4210 * old VFs. This facility is not available in BE3 FW.
4211 * Also, this is done by FW in Lancer chip.
4212 */
4213 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004214 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004215 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004216 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004217 if (status)
4218 dev_err(&adapter->pdev->dev,
4219 "Failed to optimize SRIOV resources\n");
4220 }
4221}
4222
Sathya Perla92bf14a2013-08-27 16:57:32 +05304223static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004224{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304225 struct device *dev = &adapter->pdev->dev;
4226 struct be_resources res = {0};
4227 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004228
Sathya Perla92bf14a2013-08-27 16:57:32 +05304229 if (BEx_chip(adapter)) {
4230 BEx_get_resources(adapter, &res);
4231 adapter->res = res;
4232 }
4233
Sathya Perla92bf14a2013-08-27 16:57:32 +05304234 /* For Lancer, SH etc read per-function resource limits from FW.
4235 * GET_FUNC_CONFIG returns per function guaranteed limits.
4236 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4237 */
Sathya Perla4c876612013-02-03 20:30:11 +00004238 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304239 status = be_cmd_get_func_config(adapter, &res);
4240 if (status)
4241 return status;
4242
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004243 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4244 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4245 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4246 res.max_rss_qs -= 1;
4247
Sathya Perla92bf14a2013-08-27 16:57:32 +05304248 /* If RoCE may be enabled stash away half the EQs for RoCE */
4249 if (be_roce_supported(adapter))
4250 res.max_evt_qs /= 2;
4251 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004252 }
4253
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004254 /* If FW supports RSS default queue, then skip creating non-RSS
4255 * queue for non-IP traffic.
4256 */
4257 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4258 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4259
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304260 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4261 be_max_txqs(adapter), be_max_rxqs(adapter),
4262 be_max_rss(adapter), be_max_eqs(adapter),
4263 be_max_vfs(adapter));
4264 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4265 be_max_uc(adapter), be_max_mc(adapter),
4266 be_max_vlans(adapter));
4267
Vasundhara Volamace40af2015-03-04 00:44:34 -05004268 /* Sanitize cfg_num_qs based on HW and platform limits */
4269 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4270 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304271 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004272}
4273
Sathya Perla39f1d942012-05-08 19:41:24 +00004274static int be_get_config(struct be_adapter *adapter)
4275{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004276 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304277 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004278
Suresh Reddy980df242015-12-30 01:29:03 -05004279 status = be_cmd_get_cntl_attributes(adapter);
4280 if (status)
4281 return status;
4282
Kalesh APe97e3cd2014-07-17 16:20:26 +05304283 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004284 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304285 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004286
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004287 if (!lancer_chip(adapter) && be_physfn(adapter))
4288 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4289
Sathya Perla6b085ba2015-02-23 04:20:09 -05004290 if (BEx_chip(adapter)) {
4291 level = be_cmd_get_fw_log_level(adapter);
4292 adapter->msg_enable =
4293 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4294 }
4295
4296 be_cmd_get_acpi_wol_cap(adapter);
4297
Vasundhara Volam21252372015-02-06 08:18:42 -05004298 be_cmd_query_port_name(adapter);
4299
4300 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304301 status = be_cmd_get_active_profile(adapter, &profile_id);
4302 if (!status)
4303 dev_info(&adapter->pdev->dev,
4304 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304305 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304306
Sathya Perla92bf14a2013-08-27 16:57:32 +05304307 status = be_get_resources(adapter);
4308 if (status)
4309 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004310
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304311 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4312 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304313 if (!adapter->pmac_id)
4314 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004315
Sathya Perla92bf14a2013-08-27 16:57:32 +05304316 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004317}
4318
Sathya Perla95046b92013-07-23 15:25:02 +05304319static int be_mac_setup(struct be_adapter *adapter)
4320{
4321 u8 mac[ETH_ALEN];
4322 int status;
4323
4324 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4325 status = be_cmd_get_perm_mac(adapter, mac);
4326 if (status)
4327 return status;
4328
4329 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4330 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304331 }
4332
Sathya Perla95046b92013-07-23 15:25:02 +05304333 return 0;
4334}
4335
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304336static void be_schedule_worker(struct be_adapter *adapter)
4337{
4338 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4339 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4340}
4341
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304342static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004343{
4344 schedule_delayed_work(&adapter->be_err_detection_work,
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304345 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004346 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4347}
4348
Sathya Perla77071332013-08-27 16:57:34 +05304349static int be_setup_queues(struct be_adapter *adapter)
4350{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304351 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304352 int status;
4353
4354 status = be_evt_queues_create(adapter);
4355 if (status)
4356 goto err;
4357
4358 status = be_tx_qs_create(adapter);
4359 if (status)
4360 goto err;
4361
4362 status = be_rx_cqs_create(adapter);
4363 if (status)
4364 goto err;
4365
4366 status = be_mcc_queues_create(adapter);
4367 if (status)
4368 goto err;
4369
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304370 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4371 if (status)
4372 goto err;
4373
4374 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4375 if (status)
4376 goto err;
4377
Sathya Perla77071332013-08-27 16:57:34 +05304378 return 0;
4379err:
4380 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4381 return status;
4382}
4383
Ajit Khaparde62219062016-02-10 22:45:53 +05304384static int be_if_create(struct be_adapter *adapter)
4385{
4386 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4387 u32 cap_flags = be_if_cap_flags(adapter);
4388 int status;
4389
4390 if (adapter->cfg_num_qs == 1)
4391 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4392
4393 en_flags &= cap_flags;
4394 /* will enable all the needed filter flags in be_open() */
4395 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4396 &adapter->if_handle, 0);
4397
4398 return status;
4399}
4400
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304401int be_update_queues(struct be_adapter *adapter)
4402{
4403 struct net_device *netdev = adapter->netdev;
4404 int status;
4405
4406 if (netif_running(netdev))
4407 be_close(netdev);
4408
4409 be_cancel_worker(adapter);
4410
4411 /* If any vectors have been shared with RoCE we cannot re-program
4412 * the MSIx table.
4413 */
4414 if (!adapter->num_msix_roce_vec)
4415 be_msix_disable(adapter);
4416
4417 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304418 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4419 if (status)
4420 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304421
4422 if (!msix_enabled(adapter)) {
4423 status = be_msix_enable(adapter);
4424 if (status)
4425 return status;
4426 }
4427
Ajit Khaparde62219062016-02-10 22:45:53 +05304428 status = be_if_create(adapter);
4429 if (status)
4430 return status;
4431
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304432 status = be_setup_queues(adapter);
4433 if (status)
4434 return status;
4435
4436 be_schedule_worker(adapter);
4437
4438 if (netif_running(netdev))
4439 status = be_open(netdev);
4440
4441 return status;
4442}
4443
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004444static inline int fw_major_num(const char *fw_ver)
4445{
4446 int fw_major = 0, i;
4447
4448 i = sscanf(fw_ver, "%d.", &fw_major);
4449 if (i != 1)
4450 return 0;
4451
4452 return fw_major;
4453}
4454
Sathya Perlaf962f842015-02-23 04:20:16 -05004455/* If any VFs are already enabled don't FLR the PF */
4456static bool be_reset_required(struct be_adapter *adapter)
4457{
4458 return pci_num_vf(adapter->pdev) ? false : true;
4459}
4460
4461/* Wait for the FW to be ready and perform the required initialization */
4462static int be_func_init(struct be_adapter *adapter)
4463{
4464 int status;
4465
4466 status = be_fw_wait_ready(adapter);
4467 if (status)
4468 return status;
4469
4470 if (be_reset_required(adapter)) {
4471 status = be_cmd_reset_function(adapter);
4472 if (status)
4473 return status;
4474
4475 /* Wait for interrupts to quiesce after an FLR */
4476 msleep(100);
4477
4478 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304479 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004480 }
4481
4482 /* Tell FW we're ready to fire cmds */
4483 status = be_cmd_fw_init(adapter);
4484 if (status)
4485 return status;
4486
4487 /* Allow interrupts for other ULPs running on NIC function */
4488 be_intr_set(adapter, true);
4489
4490 return 0;
4491}
4492
Sathya Perla5fb379e2009-06-18 00:02:59 +00004493static int be_setup(struct be_adapter *adapter)
4494{
Sathya Perla39f1d942012-05-08 19:41:24 +00004495 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004496 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004497
Sathya Perlaf962f842015-02-23 04:20:16 -05004498 status = be_func_init(adapter);
4499 if (status)
4500 return status;
4501
Sathya Perla30128032011-11-10 19:17:57 +00004502 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004503
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004504 if (!lancer_chip(adapter))
4505 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004506
Suresh Reddy980df242015-12-30 01:29:03 -05004507 /* invoke this cmd first to get pf_num and vf_num which are needed
4508 * for issuing profile related cmds
4509 */
4510 if (!BEx_chip(adapter)) {
4511 status = be_cmd_get_func_config(adapter, NULL);
4512 if (status)
4513 return status;
4514 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004515
Vasundhara Volamace40af2015-03-04 00:44:34 -05004516 if (!BE2_chip(adapter) && be_physfn(adapter))
4517 be_alloc_sriov_res(adapter);
4518
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004519 status = be_get_config(adapter);
4520 if (status)
4521 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004522
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004523 status = be_msix_enable(adapter);
4524 if (status)
4525 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004526
Kalesh APbcc84142015-08-05 03:27:48 -04004527 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304528 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004529 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004530 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004531
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304532 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4533 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304534 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304535 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004536 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004537 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004538
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004539 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004540
Sathya Perla95046b92013-07-23 15:25:02 +05304541 status = be_mac_setup(adapter);
4542 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004543 goto err;
4544
Kalesh APe97e3cd2014-07-17 16:20:26 +05304545 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304546 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004547
Somnath Koture9e2a902013-10-24 14:37:53 +05304548 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304549 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304550 adapter->fw_ver);
4551 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4552 }
4553
Kalesh AP00d594c2015-01-20 03:51:44 -05004554 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4555 adapter->rx_fc);
4556 if (status)
4557 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4558 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004559
Kalesh AP00d594c2015-01-20 03:51:44 -05004560 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4561 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004562
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304563 if (be_physfn(adapter))
4564 be_cmd_set_logical_link_config(adapter,
4565 IFLA_VF_LINK_STATE_AUTO, 0);
4566
Vasundhara Volambec84e62014-06-30 13:01:32 +05304567 if (adapter->num_vfs)
4568 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004569
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004570 status = be_cmd_get_phy_info(adapter);
4571 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004572 adapter->phy.fc_autoneg = 1;
4573
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304574 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304575 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004576 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004577err:
4578 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004579 return status;
4580}
4581
Ivan Vecera66268732011-12-08 01:31:21 +00004582#ifdef CONFIG_NET_POLL_CONTROLLER
4583static void be_netpoll(struct net_device *netdev)
4584{
4585 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004586 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004587 int i;
4588
Sathya Perlae49cc342012-11-27 19:50:02 +00004589 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004590 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004591 napi_schedule(&eqo->napi);
4592 }
Ivan Vecera66268732011-12-08 01:31:21 +00004593}
4594#endif
4595
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004596int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4597{
4598 const struct firmware *fw;
4599 int status;
4600
4601 if (!netif_running(adapter->netdev)) {
4602 dev_err(&adapter->pdev->dev,
4603 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304604 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004605 }
4606
4607 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4608 if (status)
4609 goto fw_exit;
4610
4611 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4612
4613 if (lancer_chip(adapter))
4614 status = lancer_fw_download(adapter, fw);
4615 else
4616 status = be_fw_download(adapter, fw);
4617
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004618 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304619 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004620
Ajit Khaparde84517482009-09-04 03:12:16 +00004621fw_exit:
4622 release_firmware(fw);
4623 return status;
4624}
4625
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004626static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4627 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004628{
4629 struct be_adapter *adapter = netdev_priv(dev);
4630 struct nlattr *attr, *br_spec;
4631 int rem;
4632 int status = 0;
4633 u16 mode = 0;
4634
4635 if (!sriov_enabled(adapter))
4636 return -EOPNOTSUPP;
4637
4638 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004639 if (!br_spec)
4640 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004641
4642 nla_for_each_nested(attr, br_spec, rem) {
4643 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4644 continue;
4645
Thomas Grafb7c1a312014-11-26 13:42:17 +01004646 if (nla_len(attr) < sizeof(mode))
4647 return -EINVAL;
4648
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004649 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004650 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4651 return -EOPNOTSUPP;
4652
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004653 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4654 return -EINVAL;
4655
4656 status = be_cmd_set_hsw_config(adapter, 0, 0,
4657 adapter->if_handle,
4658 mode == BRIDGE_MODE_VEPA ?
4659 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004660 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004661 if (status)
4662 goto err;
4663
4664 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4665 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4666
4667 return status;
4668 }
4669err:
4670 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4671 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4672
4673 return status;
4674}
4675
4676static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004677 struct net_device *dev, u32 filter_mask,
4678 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004679{
4680 struct be_adapter *adapter = netdev_priv(dev);
4681 int status = 0;
4682 u8 hsw_mode;
4683
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004684 /* BE and Lancer chips support VEB mode only */
4685 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004686 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4687 if (!pci_sriov_get_totalvfs(adapter->pdev))
4688 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004689 hsw_mode = PORT_FWD_TYPE_VEB;
4690 } else {
4691 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004692 adapter->if_handle, &hsw_mode,
4693 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004694 if (status)
4695 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004696
4697 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4698 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004699 }
4700
4701 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4702 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004703 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004704 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004705}
4706
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304707#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004708/* VxLAN offload Notes:
4709 *
4710 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4711 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4712 * is expected to work across all types of IP tunnels once exported. Skyhawk
4713 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304714 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4715 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4716 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004717 *
4718 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4719 * adds more than one port, disable offloads and don't re-enable them again
4720 * until after all the tunnels are removed.
4721 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304722static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4723 __be16 port)
4724{
4725 struct be_adapter *adapter = netdev_priv(netdev);
4726 struct device *dev = &adapter->pdev->dev;
4727 int status;
4728
Ivan Veceraaf19e682015-08-14 22:30:01 +02004729 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304730 return;
4731
Jiri Benc1e5b3112015-09-17 16:11:13 +02004732 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4733 adapter->vxlan_port_aliases++;
4734 return;
4735 }
4736
Sathya Perlac9c47142014-03-27 10:46:19 +05304737 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304738 dev_info(dev,
4739 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004740 dev_info(dev, "Disabling VxLAN offloads\n");
4741 adapter->vxlan_port_count++;
4742 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304743 }
4744
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004745 if (adapter->vxlan_port_count++ >= 1)
4746 return;
4747
Sathya Perlac9c47142014-03-27 10:46:19 +05304748 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4749 OP_CONVERT_NORMAL_TO_TUNNEL);
4750 if (status) {
4751 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4752 goto err;
4753 }
4754
4755 status = be_cmd_set_vxlan_port(adapter, port);
4756 if (status) {
4757 dev_warn(dev, "Failed to add VxLAN port\n");
4758 goto err;
4759 }
4760 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4761 adapter->vxlan_port = port;
4762
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004763 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4764 NETIF_F_TSO | NETIF_F_TSO6 |
4765 NETIF_F_GSO_UDP_TUNNEL;
4766 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304767 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004768
Sathya Perlac9c47142014-03-27 10:46:19 +05304769 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4770 be16_to_cpu(port));
4771 return;
4772err:
4773 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304774}
4775
4776static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4777 __be16 port)
4778{
4779 struct be_adapter *adapter = netdev_priv(netdev);
4780
Ivan Veceraaf19e682015-08-14 22:30:01 +02004781 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304782 return;
4783
4784 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004785 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304786
Jiri Benc1e5b3112015-09-17 16:11:13 +02004787 if (adapter->vxlan_port_aliases) {
4788 adapter->vxlan_port_aliases--;
4789 return;
4790 }
4791
Sathya Perlac9c47142014-03-27 10:46:19 +05304792 be_disable_vxlan_offloads(adapter);
4793
4794 dev_info(&adapter->pdev->dev,
4795 "Disabled VxLAN offloads for UDP port %d\n",
4796 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004797done:
4798 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304799}
Joe Stringer725d5482014-11-13 16:38:13 -08004800
Jesse Gross5f352272014-12-23 22:37:26 -08004801static netdev_features_t be_features_check(struct sk_buff *skb,
4802 struct net_device *dev,
4803 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004804{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304805 struct be_adapter *adapter = netdev_priv(dev);
4806 u8 l4_hdr = 0;
4807
4808 /* The code below restricts offload features for some tunneled packets.
4809 * Offload features for normal (non tunnel) packets are unchanged.
4810 */
4811 if (!skb->encapsulation ||
4812 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4813 return features;
4814
4815 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4816 * should disable tunnel offload features if it's not a VxLAN packet,
4817 * as tunnel offloads have been enabled only for VxLAN. This is done to
4818 * allow other tunneled traffic like GRE work fine while VxLAN
4819 * offloads are configured in Skyhawk-R.
4820 */
4821 switch (vlan_get_protocol(skb)) {
4822 case htons(ETH_P_IP):
4823 l4_hdr = ip_hdr(skb)->protocol;
4824 break;
4825 case htons(ETH_P_IPV6):
4826 l4_hdr = ipv6_hdr(skb)->nexthdr;
4827 break;
4828 default:
4829 return features;
4830 }
4831
4832 if (l4_hdr != IPPROTO_UDP ||
4833 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4834 skb->inner_protocol != htons(ETH_P_TEB) ||
4835 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4836 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08004837 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304838
4839 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004840}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304841#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304842
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304843static int be_get_phys_port_id(struct net_device *dev,
4844 struct netdev_phys_item_id *ppid)
4845{
4846 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4847 struct be_adapter *adapter = netdev_priv(dev);
4848 u8 *id;
4849
4850 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4851 return -ENOSPC;
4852
4853 ppid->id[0] = adapter->hba_port_num + 1;
4854 id = &ppid->id[1];
4855 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4856 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4857 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4858
4859 ppid->id_len = id_len;
4860
4861 return 0;
4862}
4863
stephen hemmingere5686ad2012-01-05 19:10:25 +00004864static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004865 .ndo_open = be_open,
4866 .ndo_stop = be_close,
4867 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004868 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004869 .ndo_set_mac_address = be_mac_addr_set,
4870 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004871 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004872 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004873 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4874 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004875 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004876 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004877 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004878 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304879 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004880 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00004881#ifdef CONFIG_NET_POLL_CONTROLLER
4882 .ndo_poll_controller = be_netpoll,
4883#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004884 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4885 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304886#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304887 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304888#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304889#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304890 .ndo_add_vxlan_port = be_add_vxlan_port,
4891 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004892 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304893#endif
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304894 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004895};
4896
4897static void be_netdev_init(struct net_device *netdev)
4898{
4899 struct be_adapter *adapter = netdev_priv(netdev);
4900
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004901 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004902 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004903 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05304904 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004905 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004906
4907 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004908 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004909
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004910 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004911 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004912
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004913 netdev->priv_flags |= IFF_UNICAST_FLT;
4914
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004915 netdev->flags |= IFF_MULTICAST;
4916
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05304917 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004918
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004919 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004920
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004921 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004922}
4923
Kalesh AP87ac1a52015-02-23 04:20:15 -05004924static void be_cleanup(struct be_adapter *adapter)
4925{
4926 struct net_device *netdev = adapter->netdev;
4927
4928 rtnl_lock();
4929 netif_device_detach(netdev);
4930 if (netif_running(netdev))
4931 be_close(netdev);
4932 rtnl_unlock();
4933
4934 be_clear(adapter);
4935}
4936
Kalesh AP484d76f2015-02-23 04:20:14 -05004937static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004938{
Kalesh APd0e1b312015-02-23 04:20:12 -05004939 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004940 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004941
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004942 status = be_setup(adapter);
4943 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05004944 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004945
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02004946 rtnl_lock();
4947 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05004948 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02004949 rtnl_unlock();
4950
4951 if (status)
4952 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004953
Kalesh APd0e1b312015-02-23 04:20:12 -05004954 netif_device_attach(netdev);
4955
Kalesh AP484d76f2015-02-23 04:20:14 -05004956 return 0;
4957}
4958
4959static int be_err_recover(struct be_adapter *adapter)
4960{
Kalesh AP484d76f2015-02-23 04:20:14 -05004961 int status;
4962
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304963 /* Error recovery is supported only Lancer as of now */
4964 if (!lancer_chip(adapter))
4965 return -EIO;
4966
4967 /* Wait for adapter to reach quiescent state before
4968 * destroying queues
4969 */
4970 status = be_fw_wait_ready(adapter);
4971 if (status)
4972 goto err;
4973
4974 be_cleanup(adapter);
4975
Kalesh AP484d76f2015-02-23 04:20:14 -05004976 status = be_resume(adapter);
4977 if (status)
4978 goto err;
4979
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004980 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004981err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004982 return status;
4983}
4984
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004985static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004986{
4987 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004988 container_of(work, struct be_adapter,
4989 be_err_detection_work.work);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304990 struct device *dev = &adapter->pdev->dev;
4991 int recovery_status;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304992 int delay = ERR_DETECTION_DELAY;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004993
4994 be_detect_error(adapter);
4995
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304996 if (be_check_error(adapter, BE_ERROR_HW))
4997 recovery_status = be_err_recover(adapter);
4998 else
4999 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005000
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305001 if (!recovery_status) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305002 adapter->recovery_retries = 0;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305003 dev_info(dev, "Adapter recovery successful\n");
5004 goto reschedule_task;
5005 } else if (be_virtfn(adapter)) {
5006 /* For VFs, check if PF have allocated resources
5007 * every second.
5008 */
5009 dev_err(dev, "Re-trying adapter recovery\n");
5010 goto reschedule_task;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305011 } else if (adapter->recovery_retries++ <
5012 MAX_ERR_RECOVERY_RETRY_COUNT) {
5013 /* In case of another error during recovery, it takes 30 sec
5014 * for adapter to come out of error. Retry error recovery after
5015 * this time interval.
5016 */
5017 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
5018 delay = ERR_RECOVERY_RETRY_DELAY;
5019 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305020 } else {
5021 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005022 }
5023
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305024 return;
5025reschedule_task:
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305026 be_schedule_err_detection(adapter, delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005027}
5028
Vasundhara Volam21252372015-02-06 08:18:42 -05005029static void be_log_sfp_info(struct be_adapter *adapter)
5030{
5031 int status;
5032
5033 status = be_cmd_query_sfp_info(adapter);
5034 if (!status) {
5035 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305036 "Port %c: %s Vendor: %s part no: %s",
5037 adapter->port_name,
5038 be_misconfig_evt_port_state[adapter->phy_state],
5039 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005040 adapter->phy.vendor_pn);
5041 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305042 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005043}
5044
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005045static void be_worker(struct work_struct *work)
5046{
5047 struct be_adapter *adapter =
5048 container_of(work, struct be_adapter, work.work);
5049 struct be_rx_obj *rxo;
5050 int i;
5051
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005052 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005053 * mcc completions
5054 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005055 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005056 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005057 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005058 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005059 goto reschedule;
5060 }
5061
5062 if (!adapter->stats_cmd_sent) {
5063 if (lancer_chip(adapter))
5064 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305065 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005066 else
5067 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5068 }
5069
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305070 if (be_physfn(adapter) &&
5071 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005072 be_cmd_get_die_temperature(adapter);
5073
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005074 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305075 /* Replenish RX-queues starved due to memory
5076 * allocation failures.
5077 */
5078 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305079 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005080 }
5081
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005082 /* EQ-delay update for Skyhawk is done while notifying EQ */
5083 if (!skyhawk_chip(adapter))
5084 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005085
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305086 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005087 be_log_sfp_info(adapter);
5088
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005089reschedule:
5090 adapter->work_counter++;
5091 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5092}
5093
Sathya Perla78fad34e2015-02-23 04:20:08 -05005094static void be_unmap_pci_bars(struct be_adapter *adapter)
5095{
5096 if (adapter->csr)
5097 pci_iounmap(adapter->pdev, adapter->csr);
5098 if (adapter->db)
5099 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005100 if (adapter->pcicfg && adapter->pcicfg_mapped)
5101 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005102}
5103
5104static int db_bar(struct be_adapter *adapter)
5105{
Kalesh AP18c57c72015-05-06 05:30:38 -04005106 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005107 return 0;
5108 else
5109 return 4;
5110}
5111
5112static int be_roce_map_pci_bars(struct be_adapter *adapter)
5113{
5114 if (skyhawk_chip(adapter)) {
5115 adapter->roce_db.size = 4096;
5116 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5117 db_bar(adapter));
5118 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5119 db_bar(adapter));
5120 }
5121 return 0;
5122}
5123
5124static int be_map_pci_bars(struct be_adapter *adapter)
5125{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005126 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005127 u8 __iomem *addr;
5128 u32 sli_intf;
5129
5130 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5131 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5132 SLI_INTF_FAMILY_SHIFT;
5133 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5134
5135 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005136 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005137 if (!adapter->csr)
5138 return -ENOMEM;
5139 }
5140
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005141 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005142 if (!addr)
5143 goto pci_map_err;
5144 adapter->db = addr;
5145
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005146 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5147 if (be_physfn(adapter)) {
5148 /* PCICFG is the 2nd BAR in BE2 */
5149 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5150 if (!addr)
5151 goto pci_map_err;
5152 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005153 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005154 } else {
5155 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005156 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005157 }
5158 }
5159
Sathya Perla78fad34e2015-02-23 04:20:08 -05005160 be_roce_map_pci_bars(adapter);
5161 return 0;
5162
5163pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005164 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005165 be_unmap_pci_bars(adapter);
5166 return -ENOMEM;
5167}
5168
5169static void be_drv_cleanup(struct be_adapter *adapter)
5170{
5171 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5172 struct device *dev = &adapter->pdev->dev;
5173
5174 if (mem->va)
5175 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5176
5177 mem = &adapter->rx_filter;
5178 if (mem->va)
5179 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5180
5181 mem = &adapter->stats_cmd;
5182 if (mem->va)
5183 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5184}
5185
5186/* Allocate and initialize various fields in be_adapter struct */
5187static int be_drv_init(struct be_adapter *adapter)
5188{
5189 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5190 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5191 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5192 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5193 struct device *dev = &adapter->pdev->dev;
5194 int status = 0;
5195
5196 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305197 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5198 &mbox_mem_alloc->dma,
5199 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005200 if (!mbox_mem_alloc->va)
5201 return -ENOMEM;
5202
5203 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5204 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5205 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005206
5207 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5208 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5209 &rx_filter->dma, GFP_KERNEL);
5210 if (!rx_filter->va) {
5211 status = -ENOMEM;
5212 goto free_mbox;
5213 }
5214
5215 if (lancer_chip(adapter))
5216 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5217 else if (BE2_chip(adapter))
5218 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5219 else if (BE3_chip(adapter))
5220 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5221 else
5222 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5223 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5224 &stats_cmd->dma, GFP_KERNEL);
5225 if (!stats_cmd->va) {
5226 status = -ENOMEM;
5227 goto free_rx_filter;
5228 }
5229
5230 mutex_init(&adapter->mbox_lock);
5231 spin_lock_init(&adapter->mcc_lock);
5232 spin_lock_init(&adapter->mcc_cq_lock);
5233 init_completion(&adapter->et_cmd_compl);
5234
5235 pci_save_state(adapter->pdev);
5236
5237 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005238 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5239 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005240
5241 adapter->rx_fc = true;
5242 adapter->tx_fc = true;
5243
5244 /* Must be a power of 2 or else MODULO will BUG_ON */
5245 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005246
5247 return 0;
5248
5249free_rx_filter:
5250 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5251free_mbox:
5252 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5253 mbox_mem_alloc->dma);
5254 return status;
5255}
5256
5257static void be_remove(struct pci_dev *pdev)
5258{
5259 struct be_adapter *adapter = pci_get_drvdata(pdev);
5260
5261 if (!adapter)
5262 return;
5263
5264 be_roce_dev_remove(adapter);
5265 be_intr_set(adapter, false);
5266
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005267 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005268
5269 unregister_netdev(adapter->netdev);
5270
5271 be_clear(adapter);
5272
5273 /* tell fw we're done with firing cmds */
5274 be_cmd_fw_clean(adapter);
5275
5276 be_unmap_pci_bars(adapter);
5277 be_drv_cleanup(adapter);
5278
5279 pci_disable_pcie_error_reporting(pdev);
5280
5281 pci_release_regions(pdev);
5282 pci_disable_device(pdev);
5283
5284 free_netdev(adapter->netdev);
5285}
5286
Arnd Bergmann9a032592015-05-18 23:06:45 +02005287static ssize_t be_hwmon_show_temp(struct device *dev,
5288 struct device_attribute *dev_attr,
5289 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305290{
5291 struct be_adapter *adapter = dev_get_drvdata(dev);
5292
5293 /* Unit: millidegree Celsius */
5294 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5295 return -EIO;
5296 else
5297 return sprintf(buf, "%u\n",
5298 adapter->hwmon_info.be_on_die_temp * 1000);
5299}
5300
5301static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5302 be_hwmon_show_temp, NULL, 1);
5303
5304static struct attribute *be_hwmon_attrs[] = {
5305 &sensor_dev_attr_temp1_input.dev_attr.attr,
5306 NULL
5307};
5308
5309ATTRIBUTE_GROUPS(be_hwmon);
5310
Sathya Perlad3791422012-09-28 04:39:44 +00005311static char *mc_name(struct be_adapter *adapter)
5312{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305313 char *str = ""; /* default */
5314
5315 switch (adapter->mc_type) {
5316 case UMC:
5317 str = "UMC";
5318 break;
5319 case FLEX10:
5320 str = "FLEX10";
5321 break;
5322 case vNIC1:
5323 str = "vNIC-1";
5324 break;
5325 case nPAR:
5326 str = "nPAR";
5327 break;
5328 case UFP:
5329 str = "UFP";
5330 break;
5331 case vNIC2:
5332 str = "vNIC-2";
5333 break;
5334 default:
5335 str = "";
5336 }
5337
5338 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005339}
5340
5341static inline char *func_name(struct be_adapter *adapter)
5342{
5343 return be_physfn(adapter) ? "PF" : "VF";
5344}
5345
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005346static inline char *nic_name(struct pci_dev *pdev)
5347{
5348 switch (pdev->device) {
5349 case OC_DEVICE_ID1:
5350 return OC_NAME;
5351 case OC_DEVICE_ID2:
5352 return OC_NAME_BE;
5353 case OC_DEVICE_ID3:
5354 case OC_DEVICE_ID4:
5355 return OC_NAME_LANCER;
5356 case BE_DEVICE_ID2:
5357 return BE3_NAME;
5358 case OC_DEVICE_ID5:
5359 case OC_DEVICE_ID6:
5360 return OC_NAME_SH;
5361 default:
5362 return BE_NAME;
5363 }
5364}
5365
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005366static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005367{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005368 struct be_adapter *adapter;
5369 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005370 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005371
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305372 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5373
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005374 status = pci_enable_device(pdev);
5375 if (status)
5376 goto do_none;
5377
5378 status = pci_request_regions(pdev, DRV_NAME);
5379 if (status)
5380 goto disable_dev;
5381 pci_set_master(pdev);
5382
Sathya Perla7f640062012-06-05 19:37:20 +00005383 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305384 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005385 status = -ENOMEM;
5386 goto rel_reg;
5387 }
5388 adapter = netdev_priv(netdev);
5389 adapter->pdev = pdev;
5390 pci_set_drvdata(pdev, adapter);
5391 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005392 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005393
Russell King4c15c242013-06-26 23:49:11 +01005394 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005395 if (!status) {
5396 netdev->features |= NETIF_F_HIGHDMA;
5397 } else {
Russell King4c15c242013-06-26 23:49:11 +01005398 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005399 if (status) {
5400 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5401 goto free_netdev;
5402 }
5403 }
5404
Kalesh AP2f951a92014-09-12 17:39:21 +05305405 status = pci_enable_pcie_error_reporting(pdev);
5406 if (!status)
5407 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005408
Sathya Perla78fad34e2015-02-23 04:20:08 -05005409 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005410 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005411 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005412
Sathya Perla78fad34e2015-02-23 04:20:08 -05005413 status = be_drv_init(adapter);
5414 if (status)
5415 goto unmap_bars;
5416
Sathya Perla5fb379e2009-06-18 00:02:59 +00005417 status = be_setup(adapter);
5418 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005419 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005420
Sathya Perla3abcded2010-10-03 22:12:27 -07005421 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005422 status = register_netdev(netdev);
5423 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005424 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005425
Parav Pandit045508a2012-03-26 14:27:13 +00005426 be_roce_dev_add(adapter);
5427
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305428 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005429
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305430 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005431 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305432 adapter->hwmon_info.hwmon_dev =
5433 devm_hwmon_device_register_with_groups(&pdev->dev,
5434 DRV_NAME,
5435 adapter,
5436 be_hwmon_groups);
5437 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5438 }
5439
Sathya Perlad3791422012-09-28 04:39:44 +00005440 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005441 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005442
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005443 return 0;
5444
Sathya Perla5fb379e2009-06-18 00:02:59 +00005445unsetup:
5446 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005447drv_cleanup:
5448 be_drv_cleanup(adapter);
5449unmap_bars:
5450 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005451free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005452 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005453rel_reg:
5454 pci_release_regions(pdev);
5455disable_dev:
5456 pci_disable_device(pdev);
5457do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005458 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005459 return status;
5460}
5461
5462static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5463{
5464 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005465
Suresh Reddy76a9e082014-01-15 13:23:40 +05305466 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005467 be_setup_wol(adapter, true);
5468
Ajit Khaparded4360d62013-11-22 12:51:09 -06005469 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005470 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005471
Kalesh AP87ac1a52015-02-23 04:20:15 -05005472 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005473
5474 pci_save_state(pdev);
5475 pci_disable_device(pdev);
5476 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5477 return 0;
5478}
5479
Kalesh AP484d76f2015-02-23 04:20:14 -05005480static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005481{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005482 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005483 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005484
5485 status = pci_enable_device(pdev);
5486 if (status)
5487 return status;
5488
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005489 pci_restore_state(pdev);
5490
Kalesh AP484d76f2015-02-23 04:20:14 -05005491 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005492 if (status)
5493 return status;
5494
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305495 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005496
Suresh Reddy76a9e082014-01-15 13:23:40 +05305497 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005498 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005499
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005500 return 0;
5501}
5502
Sathya Perla82456b02010-02-17 01:35:37 +00005503/*
5504 * An FLR will stop BE from DMAing any data.
5505 */
5506static void be_shutdown(struct pci_dev *pdev)
5507{
5508 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005509
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005510 if (!adapter)
5511 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005512
Devesh Sharmad114f992014-06-10 19:32:15 +05305513 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005514 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005515 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005516
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005517 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005518
Ajit Khaparde57841862011-04-06 18:08:43 +00005519 be_cmd_reset_function(adapter);
5520
Sathya Perla82456b02010-02-17 01:35:37 +00005521 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005522}
5523
Sathya Perlacf588472010-02-14 21:22:01 +00005524static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305525 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005526{
5527 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005528
5529 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5530
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05305531 be_roce_dev_remove(adapter);
5532
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305533 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5534 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005535
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005536 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005537
Kalesh AP87ac1a52015-02-23 04:20:15 -05005538 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005539 }
Sathya Perlacf588472010-02-14 21:22:01 +00005540
5541 if (state == pci_channel_io_perm_failure)
5542 return PCI_ERS_RESULT_DISCONNECT;
5543
5544 pci_disable_device(pdev);
5545
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005546 /* The error could cause the FW to trigger a flash debug dump.
5547 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005548 * can cause it not to recover; wait for it to finish.
5549 * Wait only for first function as it is needed only once per
5550 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005551 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005552 if (pdev->devfn == 0)
5553 ssleep(30);
5554
Sathya Perlacf588472010-02-14 21:22:01 +00005555 return PCI_ERS_RESULT_NEED_RESET;
5556}
5557
5558static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5559{
5560 struct be_adapter *adapter = pci_get_drvdata(pdev);
5561 int status;
5562
5563 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005564
5565 status = pci_enable_device(pdev);
5566 if (status)
5567 return PCI_ERS_RESULT_DISCONNECT;
5568
5569 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005570 pci_restore_state(pdev);
5571
5572 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005573 dev_info(&adapter->pdev->dev,
5574 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005575 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005576 if (status)
5577 return PCI_ERS_RESULT_DISCONNECT;
5578
Sathya Perlad6b6d982012-09-05 01:56:48 +00005579 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305580 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005581 return PCI_ERS_RESULT_RECOVERED;
5582}
5583
5584static void be_eeh_resume(struct pci_dev *pdev)
5585{
5586 int status = 0;
5587 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005588
5589 dev_info(&adapter->pdev->dev, "EEH resume\n");
5590
5591 pci_save_state(pdev);
5592
Kalesh AP484d76f2015-02-23 04:20:14 -05005593 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005594 if (status)
5595 goto err;
5596
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05305597 be_roce_dev_add(adapter);
5598
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305599 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00005600 return;
5601err:
5602 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005603}
5604
Vasundhara Volamace40af2015-03-04 00:44:34 -05005605static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5606{
5607 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04005608 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05005609 int status;
5610
5611 if (!num_vfs)
5612 be_vf_clear(adapter);
5613
5614 adapter->num_vfs = num_vfs;
5615
5616 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5617 dev_warn(&pdev->dev,
5618 "Cannot disable VFs while they are assigned\n");
5619 return -EBUSY;
5620 }
5621
5622 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5623 * are equally distributed across the max-number of VFs. The user may
5624 * request only a subset of the max-vfs to be enabled.
5625 * Based on num_vfs, redistribute the resources across num_vfs so that
5626 * each VF will have access to more number of resources.
5627 * This facility is not available in BE3 FW.
5628 * Also, this is done by FW in Lancer chip.
5629 */
5630 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04005631 be_calculate_vf_res(adapter, adapter->num_vfs,
5632 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05005633 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04005634 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05005635 if (status)
5636 dev_err(&pdev->dev,
5637 "Failed to optimize SR-IOV resources\n");
5638 }
5639
5640 status = be_get_resources(adapter);
5641 if (status)
5642 return be_cmd_status(status);
5643
5644 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5645 rtnl_lock();
5646 status = be_update_queues(adapter);
5647 rtnl_unlock();
5648 if (status)
5649 return be_cmd_status(status);
5650
5651 if (adapter->num_vfs)
5652 status = be_vf_setup(adapter);
5653
5654 if (!status)
5655 return adapter->num_vfs;
5656
5657 return 0;
5658}
5659
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005660static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005661 .error_detected = be_eeh_err_detected,
5662 .slot_reset = be_eeh_reset,
5663 .resume = be_eeh_resume,
5664};
5665
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005666static struct pci_driver be_driver = {
5667 .name = DRV_NAME,
5668 .id_table = be_dev_ids,
5669 .probe = be_probe,
5670 .remove = be_remove,
5671 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005672 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005673 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005674 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005675 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005676};
5677
5678static int __init be_init_module(void)
5679{
Joe Perches8e95a202009-12-03 07:58:21 +00005680 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5681 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005682 printk(KERN_WARNING DRV_NAME
5683 " : Module param rx_frag_size must be 2048/4096/8192."
5684 " Using 2048\n");
5685 rx_frag_size = 2048;
5686 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005687
Vasundhara Volamace40af2015-03-04 00:44:34 -05005688 if (num_vfs > 0) {
5689 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5690 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5691 }
5692
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005693 return pci_register_driver(&be_driver);
5694}
5695module_init(be_init_module);
5696
5697static void __exit be_exit_module(void)
5698{
5699 pci_unregister_driver(&be_driver);
5700}
5701module_exit(be_exit_module);