blob: a97fc8a80fac73735198355e08f94fb23a98f010 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500128#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
129 BE_IF_FLAGS_BROADCAST | \
130 BE_IF_FLAGS_MULTICAST | \
131 BE_IF_FLAGS_PASS_L3L4_ERRORS)
132
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530136
Sathya Perla1cfafab2012-02-23 18:50:15 +0000137 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000138 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
139 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000140 mem->va = NULL;
141 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142}
143
144static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530145 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146{
147 struct be_dma_mem *mem = &q->dma_mem;
148
149 memset(q, 0, sizeof(*q));
150 q->len = len;
151 q->entry_size = entry_size;
152 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700153 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
154 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000156 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157 return 0;
158}
159
Somnath Kotur68c45a22013-03-14 02:42:07 +0000160static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161{
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163
Sathya Perladb3ea782011-08-22 19:41:52 +0000164 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530165 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000174
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530176 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177}
178
Somnath Kotur68c45a22013-03-14 02:42:07 +0000179static void be_intr_set(struct be_adapter *adapter, bool enable)
180{
181 int status = 0;
182
183 /* On lancer interrupts can't be controlled via this register */
184 if (lancer_chip(adapter))
185 return;
186
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530187 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188 return;
189
190 status = be_cmd_intr_set(adapter, enable);
191 if (status)
192 be_reg_intr_set(adapter, enable);
193}
194
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196{
197 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530198
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530199 if (be_check_error(adapter, BE_ERROR_HW))
200 return;
201
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= qid & DB_RQ_RING_ID_MASK;
203 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000204
205 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000206 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207}
208
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000209static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
210 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211{
212 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530213
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530214 if (be_check_error(adapter, BE_ERROR_HW))
215 return;
216
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000217 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000219
220 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000221 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222}
223
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400225 bool arm, bool clear_int, u16 num_popped,
226 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227{
228 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530231 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000232
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530233 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000234 return;
235
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236 if (arm)
237 val |= 1 << DB_EQ_REARM_SHIFT;
238 if (clear_int)
239 val |= 1 << DB_EQ_CLR_SHIFT;
240 val |= 1 << DB_EQ_EVNT_SHIFT;
241 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400242 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000243 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244}
245
Sathya Perla8788fdc2009-07-27 22:52:03 +0000246void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247{
248 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530249
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000251 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
252 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000253
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530254 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000255 return;
256
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700257 if (arm)
258 val |= 1 << DB_CQ_REARM_SHIFT;
259 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000260 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261}
262
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700263static int be_mac_addr_set(struct net_device *netdev, void *p)
264{
265 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700267 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530268 int status;
269 u8 mac[ETH_ALEN];
270 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700271
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000272 if (!is_valid_ether_addr(addr->sa_data))
273 return -EADDRNOTAVAIL;
274
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530275 /* Proceed further only if, User provided MAC is different
276 * from active MAC
277 */
278 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
279 return 0;
280
Kalesh APbcc84142015-08-05 03:27:48 -0400281 /* if device is not running, copy MAC to netdev->dev_addr */
282 if (!netif_running(netdev))
283 goto done;
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
286 * privilege or if PF did not provision the new MAC address.
287 * On BE3, this cmd will always fail if the VF doesn't have the
288 * FILTMGMT privilege. This failure is OK, only if the PF programmed
289 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530291 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
292 adapter->if_handle, &adapter->pmac_id[0], 0);
293 if (!status) {
294 curr_pmac_id = adapter->pmac_id[0];
295
296 /* Delete the old programmed MAC. This call may fail if the
297 * old MAC was already deleted by the PF driver.
298 */
299 if (adapter->pmac_id[0] != old_pmac_id)
300 be_cmd_pmac_del(adapter, adapter->if_handle,
301 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000302 }
303
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 /* Decide if the new MAC is successfully activated only after
305 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000306 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530307 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
308 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000309 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000310 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311
Sathya Perla5a712c12013-07-23 15:24:59 +0530312 /* The MAC change did not happen, either due to lack of privilege
313 * or PF didn't pre-provision.
314 */
dingtianhong61d23e92013-12-30 15:40:43 +0800315 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530316 status = -EPERM;
317 goto err;
318 }
Kalesh APbcc84142015-08-05 03:27:48 -0400319done:
320 ether_addr_copy(netdev->dev_addr, addr->sa_data);
321 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000322 return 0;
323err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530324 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700325 return status;
326}
327
Sathya Perlaca34fe32012-11-06 17:48:56 +0000328/* BE2 supports only v0 cmd */
329static void *hw_stats_from_cmd(struct be_adapter *adapter)
330{
331 if (BE2_chip(adapter)) {
332 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
333
334 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500335 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000336 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
337
338 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500339 } else {
340 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
341
342 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000343 }
344}
345
346/* BE2 supports only v0 cmd */
347static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
348{
349 if (BE2_chip(adapter)) {
350 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
351
352 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500353 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000354 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
355
356 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500357 } else {
358 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
359
360 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000361 }
362}
363
364static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
367 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
368 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 &rxf_stats->port[adapter->port_num];
371 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_pause_frames = port_stats->rx_pause_frames;
375 drvs->rx_crc_errors = port_stats->rx_crc_errors;
376 drvs->rx_control_frames = port_stats->rx_control_frames;
377 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
378 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
379 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
380 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
381 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
382 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
383 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
384 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
385 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
386 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
387 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_dropped_header_too_small =
390 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000391 drvs->rx_address_filtered =
392 port_stats->rx_address_filtered +
393 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 drvs->rx_alignment_symbol_errors =
395 port_stats->rx_alignment_symbol_errors;
396
397 drvs->tx_pauseframes = port_stats->tx_pauseframes;
398 drvs->tx_controlframes = port_stats->tx_controlframes;
399
400 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000401 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000404 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->forwarded_packets = rxf_stats->forwarded_packets;
407 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000408 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
409 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
411}
412
Sathya Perlaca34fe32012-11-06 17:48:56 +0000413static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000415 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000423 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
424 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->rx_pause_frames = port_stats->rx_pause_frames;
426 drvs->rx_crc_errors = port_stats->rx_crc_errors;
427 drvs->rx_control_frames = port_stats->rx_control_frames;
428 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
429 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
430 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
431 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
432 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
433 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
434 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
435 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
436 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
437 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
440 drvs->rx_input_fifo_overflow_drop =
441 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000442 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000445 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000448 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 drvs->jabber_events = port_stats->jabber_events;
450 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000452 drvs->forwarded_packets = rxf_stats->forwarded_packets;
453 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
455 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000456 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
457}
458
Ajit Khaparde61000862013-10-03 16:16:33 -0500459static void populate_be_v2_stats(struct be_adapter *adapter)
460{
461 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
462 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
463 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
464 struct be_port_rxf_stats_v2 *port_stats =
465 &rxf_stats->port[adapter->port_num];
466 struct be_drv_stats *drvs = &adapter->drv_stats;
467
468 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
469 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
470 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
471 drvs->rx_pause_frames = port_stats->rx_pause_frames;
472 drvs->rx_crc_errors = port_stats->rx_crc_errors;
473 drvs->rx_control_frames = port_stats->rx_control_frames;
474 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
475 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
476 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
477 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
478 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
479 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
480 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
481 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
482 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
483 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
484 drvs->rx_dropped_header_too_small =
485 port_stats->rx_dropped_header_too_small;
486 drvs->rx_input_fifo_overflow_drop =
487 port_stats->rx_input_fifo_overflow_drop;
488 drvs->rx_address_filtered = port_stats->rx_address_filtered;
489 drvs->rx_alignment_symbol_errors =
490 port_stats->rx_alignment_symbol_errors;
491 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
492 drvs->tx_pauseframes = port_stats->tx_pauseframes;
493 drvs->tx_controlframes = port_stats->tx_controlframes;
494 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
495 drvs->jabber_events = port_stats->jabber_events;
496 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
497 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
498 drvs->forwarded_packets = rxf_stats->forwarded_packets;
499 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
500 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
501 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
502 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530503 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500504 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
505 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
506 drvs->rx_roce_frames = port_stats->roce_frames_received;
507 drvs->roce_drops_crc = port_stats->roce_drops_crc;
508 drvs->roce_drops_payload_len =
509 port_stats->roce_drops_payload_len;
510 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500511}
512
Selvin Xavier005d5692011-05-16 07:36:35 +0000513static void populate_lancer_stats(struct be_adapter *adapter)
514{
Selvin Xavier005d5692011-05-16 07:36:35 +0000515 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530516 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517
518 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
519 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
520 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
521 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000522 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000523 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
525 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
526 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
527 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
528 drvs->rx_dropped_tcp_length =
529 pport_stats->rx_dropped_invalid_tcp_length;
530 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
533 drvs->rx_dropped_header_too_small =
534 pport_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000536 drvs->rx_address_filtered =
537 pport_stats->rx_address_filtered +
538 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000539 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000540 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000541 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
542 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000543 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000544 drvs->forwarded_packets = pport_stats->num_forwards_lo;
545 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000546 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000547 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000548}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000549
Sathya Perla09c1c682011-08-22 19:41:53 +0000550static void accumulate_16bit_val(u32 *acc, u16 val)
551{
552#define lo(x) (x & 0xFFFF)
553#define hi(x) (x & 0xFFFF0000)
554 bool wrapped = val < lo(*acc);
555 u32 newacc = hi(*acc) + val;
556
557 if (wrapped)
558 newacc += 65536;
559 ACCESS_ONCE(*acc) = newacc;
560}
561
Jingoo Han4188e7d2013-08-05 18:02:02 +0900562static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530563 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000564{
565 if (!BEx_chip(adapter))
566 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
567 else
568 /* below erx HW counter can actually wrap around after
569 * 65535. Driver accumulates a 32-bit value
570 */
571 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
572 (u16)erx_stat);
573}
574
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000575void be_parse_stats(struct be_adapter *adapter)
576{
Ajit Khaparde61000862013-10-03 16:16:33 -0500577 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000578 struct be_rx_obj *rxo;
579 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000580 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000581
Sathya Perlaca34fe32012-11-06 17:48:56 +0000582 if (lancer_chip(adapter)) {
583 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000584 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000585 if (BE2_chip(adapter))
586 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500587 else if (BE3_chip(adapter))
588 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000589 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500590 else
591 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000592
Ajit Khaparde61000862013-10-03 16:16:33 -0500593 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000594 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000595 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
596 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000597 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000598 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000599}
600
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530602 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700603{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000605 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000607 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 u64 pkts, bytes;
609 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700610 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611
Sathya Perla3abcded2010-10-03 22:12:27 -0700612 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530614
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700616 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 pkts = rx_stats(rxo)->rx_pkts;
618 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700619 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000620 stats->rx_packets += pkts;
621 stats->rx_bytes += bytes;
622 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
623 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
624 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700625 }
626
Sathya Perla3c8def92011-06-12 20:01:58 +0000627 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000628 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530629
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700631 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 pkts = tx_stats(txo)->tx_pkts;
633 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700634 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->tx_packets += pkts;
636 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000637 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_alignment_symbol_errors +
642 drvs->rx_in_range_errors +
643 drvs->rx_out_range_errors +
644 drvs->rx_frame_too_long +
645 drvs->rx_dropped_too_small +
646 drvs->rx_dropped_too_short +
647 drvs->rx_dropped_header_too_small +
648 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000652 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000653 drvs->rx_out_range_errors +
654 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000655
Sathya Perlaab1594e2011-07-25 19:10:15 +0000656 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
658 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000659 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000660
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 /* receiver fifo overrun */
662 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000663 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000664 drvs->rx_input_fifo_overflow_drop +
665 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667}
668
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000669void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 struct net_device *netdev = adapter->netdev;
672
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000673 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000674 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000675 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000677
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530678 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000679 netif_carrier_on(netdev);
680 else
681 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200682
683 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684}
685
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500686static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687{
Sathya Perla3c8def92011-06-12 20:01:58 +0000688 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530689 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000690
Sathya Perlaab1594e2011-07-25 19:10:15 +0000691 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000692 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500693 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530694 stats->tx_pkts += tx_pkts;
695 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
696 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000697 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500700/* Returns number of WRBs needed for the skb */
701static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500703 /* +1 for the header wrb */
704 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705}
706
707static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
708{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500709 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
710 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
711 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
712 wrb->rsvd0 = 0;
713}
714
715/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
716 * to avoid the swap and shift/mask operations in wrb_fill().
717 */
718static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
719{
720 wrb->frag_pa_hi = 0;
721 wrb->frag_pa_lo = 0;
722 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000723 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724}
725
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000726static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530727 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000728{
729 u8 vlan_prio;
730 u16 vlan_tag;
731
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100732 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000733 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
734 /* If vlan priority provided by OS is NOT in available bmap */
735 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
736 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500737 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000738
739 return vlan_tag;
740}
741
Sathya Perlac9c47142014-03-27 10:46:19 +0530742/* Used only for IP tunnel packets */
743static u16 skb_inner_ip_proto(struct sk_buff *skb)
744{
745 return (inner_ip_hdr(skb)->version == 4) ?
746 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
747}
748
749static u16 skb_ip_proto(struct sk_buff *skb)
750{
751 return (ip_hdr(skb)->version == 4) ?
752 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
753}
754
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530755static inline bool be_is_txq_full(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
758}
759
760static inline bool be_can_txq_wake(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) < txo->q.len / 2;
763}
764
765static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
766{
767 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
768}
769
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530770static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
771 struct sk_buff *skb,
772 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530774 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000776 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, LSO, 1);
778 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000779 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530780 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530782 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530783 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530784 proto = skb_inner_ip_proto(skb);
785 } else {
786 proto = skb_ip_proto(skb);
787 }
788 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530789 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530790 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530791 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
793
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100794 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530795 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
796 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797 }
798
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530799 BE_WRB_F_SET(wrb_params->features, CRC, 1);
800}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500801
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530802static void wrb_fill_hdr(struct be_adapter *adapter,
803 struct be_eth_hdr_wrb *hdr,
804 struct be_wrb_params *wrb_params,
805 struct sk_buff *skb)
806{
807 memset(hdr, 0, sizeof(*hdr));
808
809 SET_TX_WRB_HDR_BITS(crc, hdr,
810 BE_WRB_F_GET(wrb_params->features, CRC));
811 SET_TX_WRB_HDR_BITS(ipcs, hdr,
812 BE_WRB_F_GET(wrb_params->features, IPCS));
813 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
814 BE_WRB_F_GET(wrb_params->features, TCPCS));
815 SET_TX_WRB_HDR_BITS(udpcs, hdr,
816 BE_WRB_F_GET(wrb_params->features, UDPCS));
817
818 SET_TX_WRB_HDR_BITS(lso, hdr,
819 BE_WRB_F_GET(wrb_params->features, LSO));
820 SET_TX_WRB_HDR_BITS(lso6, hdr,
821 BE_WRB_F_GET(wrb_params->features, LSO6));
822 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
823
824 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
825 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500826 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530827 SET_TX_WRB_HDR_BITS(event, hdr,
828 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
829 SET_TX_WRB_HDR_BITS(vlan, hdr,
830 BE_WRB_F_GET(wrb_params->features, VLAN));
831 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
832
833 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
834 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530835 SET_TX_WRB_HDR_BITS(mgmt, hdr,
836 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837}
838
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000839static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530840 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000841{
842 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500843 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000844
Sathya Perla7101e112010-03-22 20:41:12 +0000845
Sathya Perlaf986afc2015-02-06 08:18:43 -0500846 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
847 (u64)le32_to_cpu(wrb->frag_pa_lo);
848 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000849 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500850 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500852 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000853 }
854}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530856/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530857static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530859 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700860
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530861 queue_head_inc(&txo->q);
862 return head;
863}
864
865/* Set up the WRB header for xmit */
866static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
867 struct be_tx_obj *txo,
868 struct be_wrb_params *wrb_params,
869 struct sk_buff *skb, u16 head)
870{
871 u32 num_frags = skb_wrb_cnt(skb);
872 struct be_queue_info *txq = &txo->q;
873 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
874
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530875 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500876 be_dws_cpu_to_le(hdr, sizeof(*hdr));
877
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500878 BUG_ON(txo->sent_skb_list[head]);
879 txo->sent_skb_list[head] = skb;
880 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530881 atomic_add(num_frags, &txq->used);
882 txo->last_req_wrb_cnt = num_frags;
883 txo->pend_wrb_cnt += num_frags;
884}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700885
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530886/* Setup a WRB fragment (buffer descriptor) for xmit */
887static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
888 int len)
889{
890 struct be_eth_wrb *wrb;
891 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530893 wrb = queue_head_node(txq);
894 wrb_fill(wrb, busaddr, len);
895 queue_head_inc(txq);
896}
897
898/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
899 * was invoked. The producer index is restored to the previous packet and the
900 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
901 */
902static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530903 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530904 u32 copied)
905{
906 struct device *dev;
907 struct be_eth_wrb *wrb;
908 struct be_queue_info *txq = &txo->q;
909
910 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500911 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530912
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500913 /* skip the first wrb (hdr); it's not mapped */
914 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000915 while (copied) {
916 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000917 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000918 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500919 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000920 queue_head_inc(txq);
921 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530922
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500923 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530924}
925
926/* Enqueue the given packet for transmit. This routine allocates WRBs for the
927 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
928 * of WRBs used up by the packet.
929 */
930static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
931 struct sk_buff *skb,
932 struct be_wrb_params *wrb_params)
933{
934 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
935 struct device *dev = &adapter->pdev->dev;
936 struct be_queue_info *txq = &txo->q;
937 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530938 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530939 dma_addr_t busaddr;
940 int len;
941
942 head = be_tx_get_wrb_hdr(txo);
943
944 if (skb->len > skb->data_len) {
945 len = skb_headlen(skb);
946
947 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
948 if (dma_mapping_error(dev, busaddr))
949 goto dma_err;
950 map_single = true;
951 be_tx_setup_wrb_frag(txo, busaddr, len);
952 copied += len;
953 }
954
955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
956 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
957 len = skb_frag_size(frag);
958
959 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
960 if (dma_mapping_error(dev, busaddr))
961 goto dma_err;
962 be_tx_setup_wrb_frag(txo, busaddr, len);
963 copied += len;
964 }
965
966 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
967
968 be_tx_stats_update(txo, skb);
969 return wrb_cnt;
970
971dma_err:
972 adapter->drv_stats.dma_map_errors++;
973 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000974 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700975}
976
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500977static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
978{
979 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
980}
981
Somnath Kotur93040ae2012-06-26 22:32:10 +0000982static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000983 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530984 struct be_wrb_params
985 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000986{
987 u16 vlan_tag = 0;
988
989 skb = skb_share_check(skb, GFP_ATOMIC);
990 if (unlikely(!skb))
991 return skb;
992
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100993 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000994 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530995
996 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
997 if (!vlan_tag)
998 vlan_tag = adapter->pvid;
999 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1000 * skip VLAN insertion
1001 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301002 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301003 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001004
1005 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001006 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1007 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001008 if (unlikely(!skb))
1009 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001010 skb->vlan_tci = 0;
1011 }
1012
1013 /* Insert the outer VLAN, if any */
1014 if (adapter->qnq_vid) {
1015 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001016 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1017 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001018 if (unlikely(!skb))
1019 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301020 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001021 }
1022
Somnath Kotur93040ae2012-06-26 22:32:10 +00001023 return skb;
1024}
1025
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001026static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1027{
1028 struct ethhdr *eh = (struct ethhdr *)skb->data;
1029 u16 offset = ETH_HLEN;
1030
1031 if (eh->h_proto == htons(ETH_P_IPV6)) {
1032 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1033
1034 offset += sizeof(struct ipv6hdr);
1035 if (ip6h->nexthdr != NEXTHDR_TCP &&
1036 ip6h->nexthdr != NEXTHDR_UDP) {
1037 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301038 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001039
1040 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1041 if (ehdr->hdrlen == 0xff)
1042 return true;
1043 }
1044 }
1045 return false;
1046}
1047
1048static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1049{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001050 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051}
1052
Sathya Perla748b5392014-05-09 13:29:13 +05301053static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001054{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001055 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001056}
1057
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301058static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1059 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301060 struct be_wrb_params
1061 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001063 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001064 unsigned int eth_hdr_len;
1065 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001066
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001067 /* For padded packets, BE HW modifies tot_len field in IP header
1068 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001069 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001070 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001071 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1072 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001073 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001074 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001075 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001076 ip = (struct iphdr *)ip_hdr(skb);
1077 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1078 }
1079
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001080 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301081 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001082 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301083 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001084 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301085 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001086
Somnath Kotur93040ae2012-06-26 22:32:10 +00001087 /* HW has a bug wherein it will calculate CSUM for VLAN
1088 * pkts even though it is disabled.
1089 * Manually insert VLAN in pkt.
1090 */
1091 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001092 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301093 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001094 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301095 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001096 }
1097
1098 /* HW may lockup when VLAN HW tagging is requested on
1099 * certain ipv6 packets. Drop such pkts if the HW workaround to
1100 * skip HW tagging is not enabled by FW.
1101 */
1102 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301103 (adapter->pvid || adapter->qnq_vid) &&
1104 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001105 goto tx_drop;
1106
1107 /* Manual VLAN tag insertion to prevent:
1108 * ASIC lockup when the ASIC inserts VLAN tag into
1109 * certain ipv6 packets. Insert VLAN tags in driver,
1110 * and set event, completion, vlan bits accordingly
1111 * in the Tx WRB.
1112 */
1113 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1114 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301115 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001116 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301117 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001118 }
1119
Sathya Perlaee9c7992013-05-22 23:04:55 +00001120 return skb;
1121tx_drop:
1122 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301123err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001124 return NULL;
1125}
1126
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301127static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1128 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301129 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301130{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301131 int err;
1132
Suresh Reddy8227e992015-10-12 03:47:19 -04001133 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1134 * packets that are 32b or less may cause a transmit stall
1135 * on that port. The workaround is to pad such packets
1136 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301137 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001138 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001139 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301140 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301141 }
1142
1143 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301144 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301145 if (!skb)
1146 return NULL;
1147 }
1148
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301149 /* The stack can send us skbs with length greater than
1150 * what the HW can handle. Trim the extra bytes.
1151 */
1152 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1153 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1154 WARN_ON(err);
1155
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301156 return skb;
1157}
1158
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001159static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1160{
1161 struct be_queue_info *txq = &txo->q;
1162 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1163
1164 /* Mark the last request eventable if it hasn't been marked already */
1165 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1166 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1167
1168 /* compose a dummy wrb if there are odd set of wrbs to notify */
1169 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001170 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001171 queue_head_inc(txq);
1172 atomic_inc(&txq->used);
1173 txo->pend_wrb_cnt++;
1174 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1175 TX_HDR_WRB_NUM_SHIFT);
1176 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1177 TX_HDR_WRB_NUM_SHIFT);
1178 }
1179 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1180 txo->pend_wrb_cnt = 0;
1181}
1182
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301183/* OS2BMC related */
1184
1185#define DHCP_CLIENT_PORT 68
1186#define DHCP_SERVER_PORT 67
1187#define NET_BIOS_PORT1 137
1188#define NET_BIOS_PORT2 138
1189#define DHCPV6_RAS_PORT 547
1190
1191#define is_mc_allowed_on_bmc(adapter, eh) \
1192 (!is_multicast_filt_enabled(adapter) && \
1193 is_multicast_ether_addr(eh->h_dest) && \
1194 !is_broadcast_ether_addr(eh->h_dest))
1195
1196#define is_bc_allowed_on_bmc(adapter, eh) \
1197 (!is_broadcast_filt_enabled(adapter) && \
1198 is_broadcast_ether_addr(eh->h_dest))
1199
1200#define is_arp_allowed_on_bmc(adapter, skb) \
1201 (is_arp(skb) && is_arp_filt_enabled(adapter))
1202
1203#define is_broadcast_packet(eh, adapter) \
1204 (is_multicast_ether_addr(eh->h_dest) && \
1205 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1206
1207#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1208
1209#define is_arp_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1211
1212#define is_dhcp_client_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1214
1215#define is_dhcp_srvr_filt_enabled(adapter) \
1216 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1217
1218#define is_nbios_filt_enabled(adapter) \
1219 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1220
1221#define is_ipv6_na_filt_enabled(adapter) \
1222 (adapter->bmc_filt_mask & \
1223 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1224
1225#define is_ipv6_ra_filt_enabled(adapter) \
1226 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1227
1228#define is_ipv6_ras_filt_enabled(adapter) \
1229 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1230
1231#define is_broadcast_filt_enabled(adapter) \
1232 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1233
1234#define is_multicast_filt_enabled(adapter) \
1235 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1236
1237static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1238 struct sk_buff **skb)
1239{
1240 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1241 bool os2bmc = false;
1242
1243 if (!be_is_os2bmc_enabled(adapter))
1244 goto done;
1245
1246 if (!is_multicast_ether_addr(eh->h_dest))
1247 goto done;
1248
1249 if (is_mc_allowed_on_bmc(adapter, eh) ||
1250 is_bc_allowed_on_bmc(adapter, eh) ||
1251 is_arp_allowed_on_bmc(adapter, (*skb))) {
1252 os2bmc = true;
1253 goto done;
1254 }
1255
1256 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1257 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1258 u8 nexthdr = hdr->nexthdr;
1259
1260 if (nexthdr == IPPROTO_ICMPV6) {
1261 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1262
1263 switch (icmp6->icmp6_type) {
1264 case NDISC_ROUTER_ADVERTISEMENT:
1265 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1266 goto done;
1267 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1268 os2bmc = is_ipv6_na_filt_enabled(adapter);
1269 goto done;
1270 default:
1271 break;
1272 }
1273 }
1274 }
1275
1276 if (is_udp_pkt((*skb))) {
1277 struct udphdr *udp = udp_hdr((*skb));
1278
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001279 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301280 case DHCP_CLIENT_PORT:
1281 os2bmc = is_dhcp_client_filt_enabled(adapter);
1282 goto done;
1283 case DHCP_SERVER_PORT:
1284 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1285 goto done;
1286 case NET_BIOS_PORT1:
1287 case NET_BIOS_PORT2:
1288 os2bmc = is_nbios_filt_enabled(adapter);
1289 goto done;
1290 case DHCPV6_RAS_PORT:
1291 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1292 goto done;
1293 default:
1294 break;
1295 }
1296 }
1297done:
1298 /* For packets over a vlan, which are destined
1299 * to BMC, asic expects the vlan to be inline in the packet.
1300 */
1301 if (os2bmc)
1302 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1303
1304 return os2bmc;
1305}
1306
Sathya Perlaee9c7992013-05-22 23:04:55 +00001307static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1308{
1309 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001310 u16 q_idx = skb_get_queue_mapping(skb);
1311 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301312 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301313 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001314 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001315
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301316 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001317 if (unlikely(!skb))
1318 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001319
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301320 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1321
1322 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001323 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001324 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001325 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001327
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301328 /* if os2bmc is enabled and if the pkt is destined to bmc,
1329 * enqueue the pkt a 2nd time with mgmt bit set.
1330 */
1331 if (be_send_pkt_to_bmc(adapter, &skb)) {
1332 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1333 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1334 if (unlikely(!wrb_cnt))
1335 goto drop;
1336 else
1337 skb_get(skb);
1338 }
1339
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301340 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001341 netif_stop_subqueue(netdev, q_idx);
1342 tx_stats(txo)->tx_stops++;
1343 }
1344
1345 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1346 be_xmit_flush(adapter, txo);
1347
1348 return NETDEV_TX_OK;
1349drop:
1350 tx_stats(txo)->tx_drv_drops++;
1351 /* Flush the already enqueued tx requests */
1352 if (flush && txo->pend_wrb_cnt)
1353 be_xmit_flush(adapter, txo);
1354
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 return NETDEV_TX_OK;
1356}
1357
1358static int be_change_mtu(struct net_device *netdev, int new_mtu)
1359{
1360 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301361 struct device *dev = &adapter->pdev->dev;
1362
1363 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1364 dev_info(dev, "MTU must be between %d and %d bytes\n",
1365 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366 return -EINVAL;
1367 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301368
1369 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301370 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371 netdev->mtu = new_mtu;
1372 return 0;
1373}
1374
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001375static inline bool be_in_all_promisc(struct be_adapter *adapter)
1376{
1377 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1378 BE_IF_FLAGS_ALL_PROMISCUOUS;
1379}
1380
1381static int be_set_vlan_promisc(struct be_adapter *adapter)
1382{
1383 struct device *dev = &adapter->pdev->dev;
1384 int status;
1385
1386 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1387 return 0;
1388
1389 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1390 if (!status) {
1391 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1392 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1393 } else {
1394 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1395 }
1396 return status;
1397}
1398
1399static int be_clear_vlan_promisc(struct be_adapter *adapter)
1400{
1401 struct device *dev = &adapter->pdev->dev;
1402 int status;
1403
1404 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1405 if (!status) {
1406 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1407 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1408 }
1409 return status;
1410}
1411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001413 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1414 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415 */
Sathya Perla10329df2012-06-05 19:37:18 +00001416static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417{
Vasundhara Volam50762662014-09-12 17:39:14 +05301418 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001419 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301420 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001421 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001422
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001423 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001424 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001425 return 0;
1426
Sathya Perla92bf14a2013-08-27 16:57:32 +05301427 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001428 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001429
Somnath Kotur841f60f2016-07-27 05:26:15 -04001430 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1431 status = be_clear_vlan_promisc(adapter);
1432 if (status)
1433 return status;
1434 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001435 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301436 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1437 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001438
Vasundhara Volam435452a2015-03-20 06:28:23 -04001439 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001440 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001441 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001442 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001443 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1444 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301445 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001446 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001448 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449}
1450
Patrick McHardy80d5c362013-04-19 02:04:28 +00001451static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452{
1453 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001454 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001456 /* Packets with VID 0 are always received by Lancer by default */
1457 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301458 return status;
1459
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301460 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301461 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001462
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301463 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301464 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001465
Somnath Kotura6b74e02014-01-21 15:50:55 +05301466 status = be_vid_config(adapter);
1467 if (status) {
1468 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301469 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301470 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301471
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001472 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473}
1474
Patrick McHardy80d5c362013-04-19 02:04:28 +00001475static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476{
1477 struct be_adapter *adapter = netdev_priv(netdev);
1478
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001479 /* Packets with VID 0 are always received by Lancer by default */
1480 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301481 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001482
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301483 if (!test_bit(vid, adapter->vids))
1484 return 0;
1485
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301486 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301487 adapter->vlans_added--;
1488
1489 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001492static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301493{
Sathya Perlaac34b742015-02-06 08:18:40 -05001494 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001495 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1496}
1497
1498static void be_set_all_promisc(struct be_adapter *adapter)
1499{
1500 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1501 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1502}
1503
1504static void be_set_mc_promisc(struct be_adapter *adapter)
1505{
1506 int status;
1507
1508 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1509 return;
1510
1511 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1512 if (!status)
1513 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1514}
1515
1516static void be_set_mc_list(struct be_adapter *adapter)
1517{
1518 int status;
1519
1520 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1521 if (!status)
1522 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1523 else
1524 be_set_mc_promisc(adapter);
1525}
1526
1527static void be_set_uc_list(struct be_adapter *adapter)
1528{
1529 struct netdev_hw_addr *ha;
1530 int i = 1; /* First slot is claimed by the Primary MAC */
1531
1532 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1533 be_cmd_pmac_del(adapter, adapter->if_handle,
1534 adapter->pmac_id[i], 0);
1535
1536 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1537 be_set_all_promisc(adapter);
1538 return;
1539 }
1540
1541 netdev_for_each_uc_addr(ha, adapter->netdev) {
1542 adapter->uc_macs++; /* First slot is for Primary MAC */
1543 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1544 &adapter->pmac_id[adapter->uc_macs], 0);
1545 }
1546}
1547
1548static void be_clear_uc_list(struct be_adapter *adapter)
1549{
1550 int i;
1551
1552 for (i = 1; i < (adapter->uc_macs + 1); i++)
1553 be_cmd_pmac_del(adapter, adapter->if_handle,
1554 adapter->pmac_id[i], 0);
1555 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301556}
1557
Sathya Perlaa54769f2011-10-24 02:45:00 +00001558static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559{
1560 struct be_adapter *adapter = netdev_priv(netdev);
1561
1562 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001563 be_set_all_promisc(adapter);
1564 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001566
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001567 /* Interface was previously in promiscuous mode; disable it */
1568 if (be_in_all_promisc(adapter)) {
1569 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001570 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001571 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001572 }
1573
Sathya Perlae7b909a2009-11-22 22:01:10 +00001574 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001575 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001576 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1577 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301578 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001579 }
Kalesh APa0794882014-05-30 19:06:23 +05301580
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001581 if (netdev_uc_count(netdev) != adapter->uc_macs)
1582 be_set_uc_list(adapter);
1583
1584 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585}
1586
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001587static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1588{
1589 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001590 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001591 int status;
1592
Sathya Perla11ac75e2011-12-13 00:58:50 +00001593 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001594 return -EPERM;
1595
Sathya Perla11ac75e2011-12-13 00:58:50 +00001596 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001597 return -EINVAL;
1598
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301599 /* Proceed further only if user provided MAC is different
1600 * from active MAC
1601 */
1602 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1603 return 0;
1604
Sathya Perla3175d8c2013-07-23 15:25:03 +05301605 if (BEx_chip(adapter)) {
1606 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1607 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001608
Sathya Perla11ac75e2011-12-13 00:58:50 +00001609 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1610 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301611 } else {
1612 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1613 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001614 }
1615
Kalesh APabccf232014-07-17 16:20:24 +05301616 if (status) {
1617 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1618 mac, vf, status);
1619 return be_cmd_status(status);
1620 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001621
Kalesh APabccf232014-07-17 16:20:24 +05301622 ether_addr_copy(vf_cfg->mac_addr, mac);
1623
1624 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001625}
1626
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001627static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301628 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001629{
1630 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001631 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001632
Sathya Perla11ac75e2011-12-13 00:58:50 +00001633 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001634 return -EPERM;
1635
Sathya Perla11ac75e2011-12-13 00:58:50 +00001636 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001637 return -EINVAL;
1638
1639 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001640 vi->max_tx_rate = vf_cfg->tx_rate;
1641 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001642 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1643 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001644 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301645 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001646 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001647
1648 return 0;
1649}
1650
Vasundhara Volam435452a2015-03-20 06:28:23 -04001651static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1652{
1653 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1654 u16 vids[BE_NUM_VLANS_SUPPORTED];
1655 int vf_if_id = vf_cfg->if_handle;
1656 int status;
1657
1658 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001659 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001660 if (status)
1661 return status;
1662
1663 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1664 vids[0] = 0;
1665 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1666 if (!status)
1667 dev_info(&adapter->pdev->dev,
1668 "Cleared guest VLANs on VF%d", vf);
1669
1670 /* After TVT is enabled, disallow VFs to program VLAN filters */
1671 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1672 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1673 ~BE_PRIV_FILTMGMT, vf + 1);
1674 if (!status)
1675 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1676 }
1677 return 0;
1678}
1679
1680static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1681{
1682 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1683 struct device *dev = &adapter->pdev->dev;
1684 int status;
1685
1686 /* Reset Transparent VLAN Tagging. */
1687 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001688 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001689 if (status)
1690 return status;
1691
1692 /* Allow VFs to program VLAN filtering */
1693 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1694 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1695 BE_PRIV_FILTMGMT, vf + 1);
1696 if (!status) {
1697 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1698 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1699 }
1700 }
1701
1702 dev_info(dev,
1703 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1704 return 0;
1705}
1706
Sathya Perla748b5392014-05-09 13:29:13 +05301707static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001708{
1709 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001710 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001711 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001712
Sathya Perla11ac75e2011-12-13 00:58:50 +00001713 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001714 return -EPERM;
1715
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001716 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001717 return -EINVAL;
1718
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001719 if (vlan || qos) {
1720 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001721 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001722 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001723 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001724 }
1725
Kalesh APabccf232014-07-17 16:20:24 +05301726 if (status) {
1727 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001728 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1729 status);
Kalesh APabccf232014-07-17 16:20:24 +05301730 return be_cmd_status(status);
1731 }
1732
1733 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301734 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001735}
1736
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001737static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1738 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001739{
1740 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301741 struct device *dev = &adapter->pdev->dev;
1742 int percent_rate, status = 0;
1743 u16 link_speed = 0;
1744 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001745
Sathya Perla11ac75e2011-12-13 00:58:50 +00001746 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001747 return -EPERM;
1748
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001749 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001750 return -EINVAL;
1751
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001752 if (min_tx_rate)
1753 return -EINVAL;
1754
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301755 if (!max_tx_rate)
1756 goto config_qos;
1757
1758 status = be_cmd_link_status_query(adapter, &link_speed,
1759 &link_status, 0);
1760 if (status)
1761 goto err;
1762
1763 if (!link_status) {
1764 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301765 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301766 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001767 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001768
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301769 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1770 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1771 link_speed);
1772 status = -EINVAL;
1773 goto err;
1774 }
1775
1776 /* On Skyhawk the QOS setting must be done only as a % value */
1777 percent_rate = link_speed / 100;
1778 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1779 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1780 percent_rate);
1781 status = -EINVAL;
1782 goto err;
1783 }
1784
1785config_qos:
1786 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001787 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301788 goto err;
1789
1790 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1791 return 0;
1792
1793err:
1794 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1795 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301796 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001797}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301798
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301799static int be_set_vf_link_state(struct net_device *netdev, int vf,
1800 int link_state)
1801{
1802 struct be_adapter *adapter = netdev_priv(netdev);
1803 int status;
1804
1805 if (!sriov_enabled(adapter))
1806 return -EPERM;
1807
1808 if (vf >= adapter->num_vfs)
1809 return -EINVAL;
1810
1811 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301812 if (status) {
1813 dev_err(&adapter->pdev->dev,
1814 "Link state change on VF %d failed: %#x\n", vf, status);
1815 return be_cmd_status(status);
1816 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301817
Kalesh APabccf232014-07-17 16:20:24 +05301818 adapter->vf_cfg[vf].plink_tracking = link_state;
1819
1820 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301821}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001822
Kalesh APe7bcbd72015-05-06 05:30:32 -04001823static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1824{
1825 struct be_adapter *adapter = netdev_priv(netdev);
1826 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1827 u8 spoofchk;
1828 int status;
1829
1830 if (!sriov_enabled(adapter))
1831 return -EPERM;
1832
1833 if (vf >= adapter->num_vfs)
1834 return -EINVAL;
1835
1836 if (BEx_chip(adapter))
1837 return -EOPNOTSUPP;
1838
1839 if (enable == vf_cfg->spoofchk)
1840 return 0;
1841
1842 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1843
1844 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1845 0, spoofchk);
1846 if (status) {
1847 dev_err(&adapter->pdev->dev,
1848 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1849 return be_cmd_status(status);
1850 }
1851
1852 vf_cfg->spoofchk = enable;
1853 return 0;
1854}
1855
Sathya Perla2632baf2013-10-01 16:00:00 +05301856static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1857 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858{
Sathya Perla2632baf2013-10-01 16:00:00 +05301859 aic->rx_pkts_prev = rx_pkts;
1860 aic->tx_reqs_prev = tx_pkts;
1861 aic->jiffies = now;
1862}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001863
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001864static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301865{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001866 struct be_adapter *adapter = eqo->adapter;
1867 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301868 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301869 struct be_rx_obj *rxo;
1870 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001871 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301872 ulong now;
1873 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001874 int i;
1875
1876 aic = &adapter->aic_obj[eqo->idx];
1877 if (!aic->enable) {
1878 if (aic->jiffies)
1879 aic->jiffies = 0;
1880 eqd = aic->et_eqd;
1881 return eqd;
1882 }
1883
1884 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1885 do {
1886 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1887 rx_pkts += rxo->stats.rx_pkts;
1888 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1889 }
1890
1891 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1892 do {
1893 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1894 tx_pkts += txo->stats.tx_reqs;
1895 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1896 }
1897
1898 /* Skip, if wrapped around or first calculation */
1899 now = jiffies;
1900 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1901 rx_pkts < aic->rx_pkts_prev ||
1902 tx_pkts < aic->tx_reqs_prev) {
1903 be_aic_update(aic, rx_pkts, tx_pkts, now);
1904 return aic->prev_eqd;
1905 }
1906
1907 delta = jiffies_to_msecs(now - aic->jiffies);
1908 if (delta == 0)
1909 return aic->prev_eqd;
1910
1911 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1912 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1913 eqd = (pps / 15000) << 2;
1914
1915 if (eqd < 8)
1916 eqd = 0;
1917 eqd = min_t(u32, eqd, aic->max_eqd);
1918 eqd = max_t(u32, eqd, aic->min_eqd);
1919
1920 be_aic_update(aic, rx_pkts, tx_pkts, now);
1921
1922 return eqd;
1923}
1924
1925/* For Skyhawk-R only */
1926static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1927{
1928 struct be_adapter *adapter = eqo->adapter;
1929 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1930 ulong now = jiffies;
1931 int eqd;
1932 u32 mult_enc;
1933
1934 if (!aic->enable)
1935 return 0;
1936
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05301937 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001938 eqd = aic->prev_eqd;
1939 else
1940 eqd = be_get_new_eqd(eqo);
1941
1942 if (eqd > 100)
1943 mult_enc = R2I_DLY_ENC_1;
1944 else if (eqd > 60)
1945 mult_enc = R2I_DLY_ENC_2;
1946 else if (eqd > 20)
1947 mult_enc = R2I_DLY_ENC_3;
1948 else
1949 mult_enc = R2I_DLY_ENC_0;
1950
1951 aic->prev_eqd = eqd;
1952
1953 return mult_enc;
1954}
1955
1956void be_eqd_update(struct be_adapter *adapter, bool force_update)
1957{
1958 struct be_set_eqd set_eqd[MAX_EVT_QS];
1959 struct be_aic_obj *aic;
1960 struct be_eq_obj *eqo;
1961 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001962
Sathya Perla2632baf2013-10-01 16:00:00 +05301963 for_all_evt_queues(adapter, eqo, i) {
1964 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001965 eqd = be_get_new_eqd(eqo);
1966 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301967 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1968 set_eqd[num].eq_id = eqo->q.id;
1969 aic->prev_eqd = eqd;
1970 num++;
1971 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001972 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301973
1974 if (num)
1975 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001976}
1977
Sathya Perla3abcded2010-10-03 22:12:27 -07001978static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301979 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001980{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001981 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001982
Sathya Perlaab1594e2011-07-25 19:10:15 +00001983 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001984 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001985 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001986 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05301987 if (rxcp->tunneled)
1988 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001989 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001991 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001992 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001993 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994}
1995
Sathya Perla2e588f82011-03-11 02:49:26 +00001996static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001997{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001998 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301999 * Also ignore ipcksm for ipv6 pkts
2000 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002001 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302002 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002003}
2004
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302005static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002009 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302010 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011
Sathya Perla3abcded2010-10-03 22:12:27 -07002012 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013 BUG_ON(!rx_page_info->page);
2014
Sathya Perlae50287b2014-03-04 12:14:38 +05302015 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002016 dma_unmap_page(&adapter->pdev->dev,
2017 dma_unmap_addr(rx_page_info, bus),
2018 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302019 rx_page_info->last_frag = false;
2020 } else {
2021 dma_sync_single_for_cpu(&adapter->pdev->dev,
2022 dma_unmap_addr(rx_page_info, bus),
2023 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002024 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002025
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302026 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027 atomic_dec(&rxq->used);
2028 return rx_page_info;
2029}
2030
2031/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032static void be_rx_compl_discard(struct be_rx_obj *rxo,
2033 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002036 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002038 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302039 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002040 put_page(page_info->page);
2041 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042 }
2043}
2044
2045/*
2046 * skb_fill_rx_data forms a complete skb for an ether frame
2047 * indicated by rxcp.
2048 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002049static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2050 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002053 u16 i, j;
2054 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055 u8 *start;
2056
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302057 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002058 start = page_address(page_info->page) + page_info->page_offset;
2059 prefetch(start);
2060
2061 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002062 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064 skb->len = curr_frag_len;
2065 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002066 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067 /* Complete packet has now been moved to data */
2068 put_page(page_info->page);
2069 skb->data_len = 0;
2070 skb->tail += curr_frag_len;
2071 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002072 hdr_len = ETH_HLEN;
2073 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002075 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002076 skb_shinfo(skb)->frags[0].page_offset =
2077 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302078 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2079 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002080 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002081 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082 skb->tail += hdr_len;
2083 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002084 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002085
Sathya Perla2e588f82011-03-11 02:49:26 +00002086 if (rxcp->pkt_size <= rx_frag_size) {
2087 BUG_ON(rxcp->num_rcvd != 1);
2088 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002089 }
2090
2091 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002092 remaining = rxcp->pkt_size - curr_frag_len;
2093 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302094 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002095 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002096
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002097 /* Coalesce all frags from the same physical page in one slot */
2098 if (page_info->page_offset == 0) {
2099 /* Fresh page */
2100 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002101 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002102 skb_shinfo(skb)->frags[j].page_offset =
2103 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002104 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002105 skb_shinfo(skb)->nr_frags++;
2106 } else {
2107 put_page(page_info->page);
2108 }
2109
Eric Dumazet9e903e02011-10-18 21:00:24 +00002110 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111 skb->len += curr_frag_len;
2112 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002113 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002114 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002115 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002117 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002118}
2119
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002120/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302121static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002122 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002123{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002124 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002125 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002127
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002128 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002129 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002130 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002131 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132 return;
2133 }
2134
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002136
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002137 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002138 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002139 else
2140 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002142 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002143 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002144 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002145 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302146
Tom Herbertb6c0e892014-08-27 21:27:17 -07002147 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302148 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002149
Jiri Pirko343e43c2011-08-25 02:50:51 +00002150 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002151 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002152
2153 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002154}
2155
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002156/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002157static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2158 struct napi_struct *napi,
2159 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002160{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002161 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002162 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002163 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002164 u16 remaining, curr_frag_len;
2165 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002166
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002167 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002168 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002170 return;
2171 }
2172
Sathya Perla2e588f82011-03-11 02:49:26 +00002173 remaining = rxcp->pkt_size;
2174 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302175 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176
2177 curr_frag_len = min(remaining, rx_frag_size);
2178
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002179 /* Coalesce all frags from the same physical page in one slot */
2180 if (i == 0 || page_info->page_offset == 0) {
2181 /* First frag or Fresh page */
2182 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002183 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002184 skb_shinfo(skb)->frags[j].page_offset =
2185 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002186 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002187 } else {
2188 put_page(page_info->page);
2189 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002190 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002191 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193 memset(page_info, 0, sizeof(*page_info));
2194 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002195 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002197 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002198 skb->len = rxcp->pkt_size;
2199 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002200 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002201 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002202 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002203 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302204
Tom Herbertb6c0e892014-08-27 21:27:17 -07002205 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002206
Jiri Pirko343e43c2011-08-25 02:50:51 +00002207 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002208 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002209
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002210 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211}
2212
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002213static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2214 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302216 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2217 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2218 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2219 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2220 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2221 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2222 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2223 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2224 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2225 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2226 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002227 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302228 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2229 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002230 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302231 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302232 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302233 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002234}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002236static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2237 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002238{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302239 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2240 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2241 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2242 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2243 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2244 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2245 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2246 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2247 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2248 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2249 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002250 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302251 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2252 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002253 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302254 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2255 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002256}
2257
2258static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2259{
2260 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2261 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2262 struct be_adapter *adapter = rxo->adapter;
2263
2264 /* For checking the valid bit it is Ok to use either definition as the
2265 * valid bit is at the same position in both v0 and v1 Rx compl */
2266 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267 return NULL;
2268
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002269 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002270 be_dws_le_to_cpu(compl, sizeof(*compl));
2271
2272 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002273 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002274 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002276
Somnath Koture38b1702013-05-29 22:55:56 +00002277 if (rxcp->ip_frag)
2278 rxcp->l4_csum = 0;
2279
Sathya Perla15d72182011-03-21 20:49:26 +00002280 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302281 /* In QNQ modes, if qnq bit is not set, then the packet was
2282 * tagged only with the transparent outer vlan-tag and must
2283 * not be treated as a vlan packet by host
2284 */
2285 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002286 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002287
Sathya Perla15d72182011-03-21 20:49:26 +00002288 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002289 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002290
Somnath Kotur939cf302011-08-18 21:51:49 -07002291 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302292 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002293 rxcp->vlanf = 0;
2294 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002295
2296 /* As the compl has been parsed, reset it; we wont touch it again */
2297 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300 return rxcp;
2301}
2302
Eric Dumazet1829b082011-03-01 05:48:12 +00002303static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002306
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002308 gfp |= __GFP_COMP;
2309 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310}
2311
2312/*
2313 * Allocate a page, split it to fragments of size rx_frag_size and post as
2314 * receive buffers to BE
2315 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302316static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002317{
Sathya Perla3abcded2010-10-03 22:12:27 -07002318 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002319 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002320 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002322 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323 struct be_eth_rx_d *rxd;
2324 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302325 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326
Sathya Perla3abcded2010-10-03 22:12:27 -07002327 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302328 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002329 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002330 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002332 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333 break;
2334 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002335 page_dmaaddr = dma_map_page(dev, pagep, 0,
2336 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002337 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002338 if (dma_mapping_error(dev, page_dmaaddr)) {
2339 put_page(pagep);
2340 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302341 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002342 break;
2343 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302344 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345 } else {
2346 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302347 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002348 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302349 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002350 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351
2352 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302353 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2355 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356
2357 /* Any space left in the current big page for another frag? */
2358 if ((page_offset + rx_frag_size + rx_frag_size) >
2359 adapter->big_page_size) {
2360 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302361 page_info->last_frag = true;
2362 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2363 } else {
2364 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002366
2367 prev_page_info = page_info;
2368 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002369 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002370 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302371
2372 /* Mark the last frag of a page when we break out of the above loop
2373 * with no more slots available in the RXQ
2374 */
2375 if (pagep) {
2376 prev_page_info->last_frag = true;
2377 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2378 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379
2380 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002381 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302382 if (rxo->rx_post_starved)
2383 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302384 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002385 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302386 be_rxq_notify(adapter, rxq->id, notify);
2387 posted -= notify;
2388 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002389 } else if (atomic_read(&rxq->used) == 0) {
2390 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002391 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002392 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393}
2394
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302395static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002396{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302397 struct be_queue_info *tx_cq = &txo->cq;
2398 struct be_tx_compl_info *txcp = &txo->txcp;
2399 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002400
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302401 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002402 return NULL;
2403
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302404 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002405 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302406 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002407
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302408 txcp->status = GET_TX_COMPL_BITS(status, compl);
2409 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002410
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302411 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002412 queue_tail_inc(tx_cq);
2413 return txcp;
2414}
2415
Sathya Perla3c8def92011-06-12 20:01:58 +00002416static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302417 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418{
Sathya Perla3c8def92011-06-12 20:01:58 +00002419 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002420 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002421 struct sk_buff *skb = NULL;
2422 bool unmap_skb_hdr = false;
2423 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302424 u16 num_wrbs = 0;
2425 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002426
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002427 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002428 if (sent_skbs[txq->tail]) {
2429 /* Free skb from prev req */
2430 if (skb)
2431 dev_consume_skb_any(skb);
2432 skb = sent_skbs[txq->tail];
2433 sent_skbs[txq->tail] = NULL;
2434 queue_tail_inc(txq); /* skip hdr wrb */
2435 num_wrbs++;
2436 unmap_skb_hdr = true;
2437 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002438 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002439 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002440 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002441 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002442 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002443 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002444 num_wrbs++;
2445 } while (frag_index != last_index);
2446 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002447
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002448 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002449}
2450
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002451/* Return the number of events in the event queue */
2452static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002453{
2454 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002455 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002456
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002457 do {
2458 eqe = queue_tail_node(&eqo->q);
2459 if (eqe->evt == 0)
2460 break;
2461
2462 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002463 eqe->evt = 0;
2464 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 queue_tail_inc(&eqo->q);
2466 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002467
2468 return num;
2469}
2470
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002471/* Leaves the EQ is disarmed state */
2472static void be_eq_clean(struct be_eq_obj *eqo)
2473{
2474 int num = events_get(eqo);
2475
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002476 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002477}
2478
Kalesh AP99b44302015-08-05 03:27:49 -04002479/* Free posted rx buffers that were not used */
2480static void be_rxq_clean(struct be_rx_obj *rxo)
2481{
2482 struct be_queue_info *rxq = &rxo->q;
2483 struct be_rx_page_info *page_info;
2484
2485 while (atomic_read(&rxq->used) > 0) {
2486 page_info = get_rx_page_info(rxo);
2487 put_page(page_info->page);
2488 memset(page_info, 0, sizeof(*page_info));
2489 }
2490 BUG_ON(atomic_read(&rxq->used));
2491 rxq->tail = 0;
2492 rxq->head = 0;
2493}
2494
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002495static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496{
Sathya Perla3abcded2010-10-03 22:12:27 -07002497 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002498 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002499 struct be_adapter *adapter = rxo->adapter;
2500 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002501
Sathya Perlad23e9462012-12-17 19:38:51 +00002502 /* Consume pending rx completions.
2503 * Wait for the flush completion (identified by zero num_rcvd)
2504 * to arrive. Notify CQ even when there are no more CQ entries
2505 * for HW to flush partially coalesced CQ entries.
2506 * In Lancer, there is no need to wait for flush compl.
2507 */
2508 for (;;) {
2509 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302510 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002511 if (lancer_chip(adapter))
2512 break;
2513
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302514 if (flush_wait++ > 50 ||
2515 be_check_error(adapter,
2516 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002517 dev_warn(&adapter->pdev->dev,
2518 "did not receive flush compl\n");
2519 break;
2520 }
2521 be_cq_notify(adapter, rx_cq->id, true, 0);
2522 mdelay(1);
2523 } else {
2524 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002525 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002526 if (rxcp->num_rcvd == 0)
2527 break;
2528 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002529 }
2530
Sathya Perlad23e9462012-12-17 19:38:51 +00002531 /* After cleanup, leave the CQ in unarmed state */
2532 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533}
2534
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002535static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002536{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002537 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302538 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302539 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002540 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302541 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302542 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002543 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002544
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302545 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002546 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002547 pending_txqs = adapter->num_tx_qs;
2548
2549 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302550 cmpl = 0;
2551 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002552 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302553 while ((txcp = be_tx_compl_get(txo))) {
2554 num_wrbs +=
2555 be_tx_compl_process(adapter, txo,
2556 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002557 cmpl++;
2558 }
2559 if (cmpl) {
2560 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2561 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302562 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002563 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302564 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002565 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002566 }
2567
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302568 if (pending_txqs == 0 || ++timeo > 10 ||
2569 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002570 break;
2571
2572 mdelay(1);
2573 } while (true);
2574
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002575 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002576 for_all_tx_queues(adapter, txo, i) {
2577 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002578
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002579 if (atomic_read(&txq->used)) {
2580 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2581 i, atomic_read(&txq->used));
2582 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002583 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002584 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2585 txq->len);
2586 /* Use the tx-compl process logic to handle requests
2587 * that were not sent to the HW.
2588 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002589 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2590 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002591 BUG_ON(atomic_read(&txq->used));
2592 txo->pend_wrb_cnt = 0;
2593 /* Since hw was never notified of these requests,
2594 * reset TXQ indices
2595 */
2596 txq->head = notified_idx;
2597 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002598 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002599 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600}
2601
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002602static void be_evt_queues_destroy(struct be_adapter *adapter)
2603{
2604 struct be_eq_obj *eqo;
2605 int i;
2606
2607 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002608 if (eqo->q.created) {
2609 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002610 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302611 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302612 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002613 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002614 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002615 be_queue_free(adapter, &eqo->q);
2616 }
2617}
2618
2619static int be_evt_queues_create(struct be_adapter *adapter)
2620{
2621 struct be_queue_info *eq;
2622 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302623 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624 int i, rc;
2625
Sathya Perlae2617682016-06-22 08:54:54 -04002626 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302627 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002628 max(adapter->cfg_num_rx_irqs,
2629 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002630
2631 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302632 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002633
Sathya Perla2632baf2013-10-01 16:00:00 +05302634 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002635 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002636 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302637 aic->max_eqd = BE_MAX_EQD;
2638 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002639
2640 eq = &eqo->q;
2641 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302642 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002643 if (rc)
2644 return rc;
2645
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302646 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002647 if (rc)
2648 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002649
2650 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2651 return -ENOMEM;
2652 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2653 eqo->affinity_mask);
2654 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2655 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002656 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002657 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658}
2659
Sathya Perla5fb379e2009-06-18 00:02:59 +00002660static void be_mcc_queues_destroy(struct be_adapter *adapter)
2661{
2662 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002663
Sathya Perla8788fdc2009-07-27 22:52:03 +00002664 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002665 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002666 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002667 be_queue_free(adapter, q);
2668
Sathya Perla8788fdc2009-07-27 22:52:03 +00002669 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002670 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002671 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002672 be_queue_free(adapter, q);
2673}
2674
2675/* Must be called only after TX qs are created as MCC shares TX EQ */
2676static int be_mcc_queues_create(struct be_adapter *adapter)
2677{
2678 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002679
Sathya Perla8788fdc2009-07-27 22:52:03 +00002680 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002681 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302682 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002683 goto err;
2684
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002685 /* Use the default EQ for MCC completions */
2686 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002687 goto mcc_cq_free;
2688
Sathya Perla8788fdc2009-07-27 22:52:03 +00002689 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002690 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2691 goto mcc_cq_destroy;
2692
Sathya Perla8788fdc2009-07-27 22:52:03 +00002693 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002694 goto mcc_q_free;
2695
2696 return 0;
2697
2698mcc_q_free:
2699 be_queue_free(adapter, q);
2700mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002701 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002702mcc_cq_free:
2703 be_queue_free(adapter, cq);
2704err:
2705 return -1;
2706}
2707
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002708static void be_tx_queues_destroy(struct be_adapter *adapter)
2709{
2710 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002711 struct be_tx_obj *txo;
2712 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002713
Sathya Perla3c8def92011-06-12 20:01:58 +00002714 for_all_tx_queues(adapter, txo, i) {
2715 q = &txo->q;
2716 if (q->created)
2717 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2718 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002719
Sathya Perla3c8def92011-06-12 20:01:58 +00002720 q = &txo->cq;
2721 if (q->created)
2722 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2723 be_queue_free(adapter, q);
2724 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002725}
2726
Sathya Perla77071332013-08-27 16:57:34 +05302727static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002728{
Sathya Perla73f394e2015-03-26 03:05:09 -04002729 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002730 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002731 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302732 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002733
Sathya Perlae2617682016-06-22 08:54:54 -04002734 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002735
Sathya Perla3c8def92011-06-12 20:01:58 +00002736 for_all_tx_queues(adapter, txo, i) {
2737 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002738 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2739 sizeof(struct be_eth_tx_compl));
2740 if (status)
2741 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002742
John Stultz827da442013-10-07 15:51:58 -07002743 u64_stats_init(&txo->stats.sync);
2744 u64_stats_init(&txo->stats.sync_compl);
2745
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002746 /* If num_evt_qs is less than num_tx_qs, then more than
2747 * one txq share an eq
2748 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002749 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2750 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751 if (status)
2752 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002753
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002754 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2755 sizeof(struct be_eth_wrb));
2756 if (status)
2757 return status;
2758
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002759 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002760 if (status)
2761 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002762
2763 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2764 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002765 }
2766
Sathya Perlad3791422012-09-28 04:39:44 +00002767 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2768 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002769 return 0;
2770}
2771
2772static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002773{
2774 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002775 struct be_rx_obj *rxo;
2776 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002777
Sathya Perla3abcded2010-10-03 22:12:27 -07002778 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002779 q = &rxo->cq;
2780 if (q->created)
2781 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2782 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002783 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002784}
2785
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002786static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002787{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002788 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002789 struct be_rx_obj *rxo;
2790 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002791
Sathya Perlae2617682016-06-22 08:54:54 -04002792 adapter->num_rss_qs =
2793 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05302794
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002795 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04002796 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002797 adapter->num_rss_qs = 0;
2798
2799 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2800
2801 /* When the interface is not capable of RSS rings (and there is no
2802 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002803 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002804 if (adapter->num_rx_qs == 0)
2805 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302806
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002807 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002808 for_all_rx_queues(adapter, rxo, i) {
2809 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002810 cq = &rxo->cq;
2811 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302812 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002813 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002814 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002815
John Stultz827da442013-10-07 15:51:58 -07002816 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002817 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2818 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002819 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002821 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002822
Sathya Perlad3791422012-09-28 04:39:44 +00002823 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002824 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002826}
2827
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002828static irqreturn_t be_intx(int irq, void *dev)
2829{
Sathya Perlae49cc342012-11-27 19:50:02 +00002830 struct be_eq_obj *eqo = dev;
2831 struct be_adapter *adapter = eqo->adapter;
2832 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002833
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002834 /* IRQ is not expected when NAPI is scheduled as the EQ
2835 * will not be armed.
2836 * But, this can happen on Lancer INTx where it takes
2837 * a while to de-assert INTx or in BE2 where occasionaly
2838 * an interrupt may be raised even when EQ is unarmed.
2839 * If NAPI is already scheduled, then counting & notifying
2840 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002841 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002842 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002843 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002844 __napi_schedule(&eqo->napi);
2845 if (num_evts)
2846 eqo->spurious_intr = 0;
2847 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002848 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002849
2850 /* Return IRQ_HANDLED only for the the first spurious intr
2851 * after a valid intr to stop the kernel from branding
2852 * this irq as a bad one!
2853 */
2854 if (num_evts || eqo->spurious_intr++ == 0)
2855 return IRQ_HANDLED;
2856 else
2857 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002858}
2859
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002860static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002861{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002862 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002863
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002864 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002865 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002866 return IRQ_HANDLED;
2867}
2868
Sathya Perla2e588f82011-03-11 02:49:26 +00002869static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002870{
Somnath Koture38b1702013-05-29 22:55:56 +00002871 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002872}
2873
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002874static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302875 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002876{
Sathya Perla3abcded2010-10-03 22:12:27 -07002877 struct be_adapter *adapter = rxo->adapter;
2878 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002879 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002880 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302881 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002882
2883 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002884 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002885 if (!rxcp)
2886 break;
2887
Sathya Perla12004ae2011-08-02 19:57:46 +00002888 /* Is it a flush compl that has no data */
2889 if (unlikely(rxcp->num_rcvd == 0))
2890 goto loop_continue;
2891
2892 /* Discard compl with partial DMA Lancer B0 */
2893 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002894 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002895 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002896 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002897
Sathya Perla12004ae2011-08-02 19:57:46 +00002898 /* On BE drop pkts that arrive due to imperfect filtering in
2899 * promiscuous mode on some skews
2900 */
2901 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302902 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002903 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002904 goto loop_continue;
2905 }
2906
Sathya Perla6384a4d2013-10-25 10:40:16 +05302907 /* Don't do gro when we're busy_polling */
2908 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002909 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002910 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302911 be_rx_compl_process(rxo, napi, rxcp);
2912
Sathya Perla12004ae2011-08-02 19:57:46 +00002913loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302914 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002915 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002916 }
2917
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002918 if (work_done) {
2919 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002920
Sathya Perla6384a4d2013-10-25 10:40:16 +05302921 /* When an rx-obj gets into post_starved state, just
2922 * let be_worker do the posting.
2923 */
2924 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2925 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302926 be_post_rx_frags(rxo, GFP_ATOMIC,
2927 max_t(u32, MAX_RX_POST,
2928 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002929 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002930
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002931 return work_done;
2932}
2933
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302934static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302935{
2936 switch (status) {
2937 case BE_TX_COMP_HDR_PARSE_ERR:
2938 tx_stats(txo)->tx_hdr_parse_err++;
2939 break;
2940 case BE_TX_COMP_NDMA_ERR:
2941 tx_stats(txo)->tx_dma_err++;
2942 break;
2943 case BE_TX_COMP_ACL_ERR:
2944 tx_stats(txo)->tx_spoof_check_err++;
2945 break;
2946 }
2947}
2948
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302949static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302950{
2951 switch (status) {
2952 case LANCER_TX_COMP_LSO_ERR:
2953 tx_stats(txo)->tx_tso_err++;
2954 break;
2955 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2956 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2957 tx_stats(txo)->tx_spoof_check_err++;
2958 break;
2959 case LANCER_TX_COMP_QINQ_ERR:
2960 tx_stats(txo)->tx_qinq_err++;
2961 break;
2962 case LANCER_TX_COMP_PARITY_ERR:
2963 tx_stats(txo)->tx_internal_parity_err++;
2964 break;
2965 case LANCER_TX_COMP_DMA_ERR:
2966 tx_stats(txo)->tx_dma_err++;
2967 break;
2968 }
2969}
2970
Sathya Perlac8f64612014-09-02 09:56:55 +05302971static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2972 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002973{
Sathya Perlac8f64612014-09-02 09:56:55 +05302974 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302975 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002976
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302977 while ((txcp = be_tx_compl_get(txo))) {
2978 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302979 work_done++;
2980
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302981 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302982 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302983 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302984 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302985 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302986 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002987 }
2988
2989 if (work_done) {
2990 be_cq_notify(adapter, txo->cq.id, true, work_done);
2991 atomic_sub(num_wrbs, &txo->q.used);
2992
2993 /* As Tx wrbs have been freed up, wake up netdev queue
2994 * if it was stopped due to lack of tx wrbs. */
2995 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302996 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002997 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002998 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002999
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003000 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3001 tx_stats(txo)->tx_compl += work_done;
3002 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3003 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003004}
Sathya Perla3c8def92011-06-12 20:01:58 +00003005
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003006#ifdef CONFIG_NET_RX_BUSY_POLL
3007static inline bool be_lock_napi(struct be_eq_obj *eqo)
3008{
3009 bool status = true;
3010
3011 spin_lock(&eqo->lock); /* BH is already disabled */
3012 if (eqo->state & BE_EQ_LOCKED) {
3013 WARN_ON(eqo->state & BE_EQ_NAPI);
3014 eqo->state |= BE_EQ_NAPI_YIELD;
3015 status = false;
3016 } else {
3017 eqo->state = BE_EQ_NAPI;
3018 }
3019 spin_unlock(&eqo->lock);
3020 return status;
3021}
3022
3023static inline void be_unlock_napi(struct be_eq_obj *eqo)
3024{
3025 spin_lock(&eqo->lock); /* BH is already disabled */
3026
3027 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3028 eqo->state = BE_EQ_IDLE;
3029
3030 spin_unlock(&eqo->lock);
3031}
3032
3033static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3034{
3035 bool status = true;
3036
3037 spin_lock_bh(&eqo->lock);
3038 if (eqo->state & BE_EQ_LOCKED) {
3039 eqo->state |= BE_EQ_POLL_YIELD;
3040 status = false;
3041 } else {
3042 eqo->state |= BE_EQ_POLL;
3043 }
3044 spin_unlock_bh(&eqo->lock);
3045 return status;
3046}
3047
3048static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3049{
3050 spin_lock_bh(&eqo->lock);
3051
3052 WARN_ON(eqo->state & (BE_EQ_NAPI));
3053 eqo->state = BE_EQ_IDLE;
3054
3055 spin_unlock_bh(&eqo->lock);
3056}
3057
3058static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3059{
3060 spin_lock_init(&eqo->lock);
3061 eqo->state = BE_EQ_IDLE;
3062}
3063
3064static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3065{
3066 local_bh_disable();
3067
3068 /* It's enough to just acquire napi lock on the eqo to stop
3069 * be_busy_poll() from processing any queueus.
3070 */
3071 while (!be_lock_napi(eqo))
3072 mdelay(1);
3073
3074 local_bh_enable();
3075}
3076
3077#else /* CONFIG_NET_RX_BUSY_POLL */
3078
3079static inline bool be_lock_napi(struct be_eq_obj *eqo)
3080{
3081 return true;
3082}
3083
3084static inline void be_unlock_napi(struct be_eq_obj *eqo)
3085{
3086}
3087
3088static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3089{
3090 return false;
3091}
3092
3093static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3094{
3095}
3096
3097static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3098{
3099}
3100
3101static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3102{
3103}
3104#endif /* CONFIG_NET_RX_BUSY_POLL */
3105
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303106int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003107{
3108 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3109 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003110 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303111 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303112 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003113 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003114
Sathya Perla0b545a62012-11-23 00:27:18 +00003115 num_evts = events_get(eqo);
3116
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303117 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3118 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003119
Sathya Perla6384a4d2013-10-25 10:40:16 +05303120 if (be_lock_napi(eqo)) {
3121 /* This loop will iterate twice for EQ0 in which
3122 * completions of the last RXQ (default one) are also processed
3123 * For other EQs the loop iterates only once
3124 */
3125 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3126 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3127 max_work = max(work, max_work);
3128 }
3129 be_unlock_napi(eqo);
3130 } else {
3131 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003132 }
3133
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003134 if (is_mcc_eqo(eqo))
3135 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003136
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003137 if (max_work < budget) {
3138 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003139
3140 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3141 * delay via a delay multiplier encoding value
3142 */
3143 if (skyhawk_chip(adapter))
3144 mult_enc = be_get_eq_delay_mult_enc(eqo);
3145
3146 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3147 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003148 } else {
3149 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003150 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003151 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003152 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003153}
3154
Sathya Perla6384a4d2013-10-25 10:40:16 +05303155#ifdef CONFIG_NET_RX_BUSY_POLL
3156static int be_busy_poll(struct napi_struct *napi)
3157{
3158 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3159 struct be_adapter *adapter = eqo->adapter;
3160 struct be_rx_obj *rxo;
3161 int i, work = 0;
3162
3163 if (!be_lock_busy_poll(eqo))
3164 return LL_FLUSH_BUSY;
3165
3166 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3167 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3168 if (work)
3169 break;
3170 }
3171
3172 be_unlock_busy_poll(eqo);
3173 return work;
3174}
3175#endif
3176
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003177void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003178{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003179 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3180 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003181 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303182 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003183
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303184 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003185 return;
3186
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003187 if (lancer_chip(adapter)) {
3188 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3189 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303190 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003191 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303192 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003193 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303194 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303195 /* Do not log error messages if its a FW reset */
3196 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3197 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3198 dev_info(dev, "Firmware update in progress\n");
3199 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303200 dev_err(dev, "Error detected in the card\n");
3201 dev_err(dev, "ERR: sliport status 0x%x\n",
3202 sliport_status);
3203 dev_err(dev, "ERR: sliport error1 0x%x\n",
3204 sliport_err1);
3205 dev_err(dev, "ERR: sliport error2 0x%x\n",
3206 sliport_err2);
3207 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003208 }
3209 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003210 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3211 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3212 ue_lo_mask = ioread32(adapter->pcicfg +
3213 PCICFG_UE_STATUS_LOW_MASK);
3214 ue_hi_mask = ioread32(adapter->pcicfg +
3215 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003216
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003217 ue_lo = (ue_lo & ~ue_lo_mask);
3218 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003219
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303220 /* On certain platforms BE hardware can indicate spurious UEs.
3221 * Allow HW to stop working completely in case of a real UE.
3222 * Hence not setting the hw_error for UE detection.
3223 */
3224
3225 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303226 dev_err(dev,
3227 "Unrecoverable Error detected in the adapter");
3228 dev_err(dev, "Please reboot server to recover");
3229 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303230 be_set_error(adapter, BE_ERROR_UE);
3231
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303232 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3233 if (ue_lo & 1)
3234 dev_err(dev, "UE: %s bit set\n",
3235 ue_status_low_desc[i]);
3236 }
3237 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3238 if (ue_hi & 1)
3239 dev_err(dev, "UE: %s bit set\n",
3240 ue_status_hi_desc[i]);
3241 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303242 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003243 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003244}
3245
Sathya Perla8d56ff12009-11-22 22:02:26 +00003246static void be_msix_disable(struct be_adapter *adapter)
3247{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003248 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003249 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003250 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303251 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003252 }
3253}
3254
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003255static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003256{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003257 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003258 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003259 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003260
Sathya Perlace7faf02016-06-22 08:54:53 -04003261 /* If RoCE is supported, program the max number of vectors that
3262 * could be used for NIC and RoCE, else, just program the number
3263 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303264 */
Sathya Perlae2617682016-06-22 08:54:54 -04003265 if (be_roce_supported(adapter)) {
3266 max_roce_eqs =
3267 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3268 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3269 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3270 } else {
3271 num_vec = max(adapter->cfg_num_rx_irqs,
3272 adapter->cfg_num_tx_irqs);
3273 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003274
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003275 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003276 adapter->msix_entries[i].entry = i;
3277
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003278 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3279 MIN_MSIX_VECTORS, num_vec);
3280 if (num_vec < 0)
3281 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003282
Sathya Perla92bf14a2013-08-27 16:57:32 +05303283 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3284 adapter->num_msix_roce_vec = num_vec / 2;
3285 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3286 adapter->num_msix_roce_vec);
3287 }
3288
3289 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3290
3291 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3292 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003293 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003294
3295fail:
3296 dev_warn(dev, "MSIx enable failed\n");
3297
3298 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003299 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003300 return num_vec;
3301 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003302}
3303
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003304static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303305 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003306{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303307 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003308}
3309
3310static int be_msix_register(struct be_adapter *adapter)
3311{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003312 struct net_device *netdev = adapter->netdev;
3313 struct be_eq_obj *eqo;
3314 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003315
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003316 for_all_evt_queues(adapter, eqo, i) {
3317 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3318 vec = be_msix_vec_get(adapter, eqo);
3319 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003320 if (status)
3321 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003322
3323 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003324 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003325
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003326 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003327err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303328 for (i--; i >= 0; i--) {
3329 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003330 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303331 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003332 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303333 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003334 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003335 return status;
3336}
3337
3338static int be_irq_register(struct be_adapter *adapter)
3339{
3340 struct net_device *netdev = adapter->netdev;
3341 int status;
3342
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003343 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003344 status = be_msix_register(adapter);
3345 if (status == 0)
3346 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003347 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003348 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003349 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003350 }
3351
Sathya Perlae49cc342012-11-27 19:50:02 +00003352 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003353 netdev->irq = adapter->pdev->irq;
3354 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003355 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003356 if (status) {
3357 dev_err(&adapter->pdev->dev,
3358 "INTx request IRQ failed - err %d\n", status);
3359 return status;
3360 }
3361done:
3362 adapter->isr_registered = true;
3363 return 0;
3364}
3365
3366static void be_irq_unregister(struct be_adapter *adapter)
3367{
3368 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003369 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003370 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003371
3372 if (!adapter->isr_registered)
3373 return;
3374
3375 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003376 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003377 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003378 goto done;
3379 }
3380
3381 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003382 for_all_evt_queues(adapter, eqo, i) {
3383 vec = be_msix_vec_get(adapter, eqo);
3384 irq_set_affinity_hint(vec, NULL);
3385 free_irq(vec, eqo);
3386 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003387
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003388done:
3389 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003390}
3391
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003392static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003393{
Ajit Khaparde62219062016-02-10 22:45:53 +05303394 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003395 struct be_queue_info *q;
3396 struct be_rx_obj *rxo;
3397 int i;
3398
3399 for_all_rx_queues(adapter, rxo, i) {
3400 q = &rxo->q;
3401 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003402 /* If RXQs are destroyed while in an "out of buffer"
3403 * state, there is a possibility of an HW stall on
3404 * Lancer. So, post 64 buffers to each queue to relieve
3405 * the "out of buffer" condition.
3406 * Make sure there's space in the RXQ before posting.
3407 */
3408 if (lancer_chip(adapter)) {
3409 be_rx_cq_clean(rxo);
3410 if (atomic_read(&q->used) == 0)
3411 be_post_rx_frags(rxo, GFP_KERNEL,
3412 MAX_RX_POST);
3413 }
3414
Sathya Perla482c9e72011-06-29 23:33:17 +00003415 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003416 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003417 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003418 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003419 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003420 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303421
3422 if (rss->rss_flags) {
3423 rss->rss_flags = RSS_ENABLE_NONE;
3424 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3425 128, rss->rss_hkey);
3426 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003427}
3428
Kalesh APbcc84142015-08-05 03:27:48 -04003429static void be_disable_if_filters(struct be_adapter *adapter)
3430{
3431 be_cmd_pmac_del(adapter, adapter->if_handle,
3432 adapter->pmac_id[0], 0);
3433
3434 be_clear_uc_list(adapter);
3435
3436 /* The IFACE flags are enabled in the open path and cleared
3437 * in the close path. When a VF gets detached from the host and
3438 * assigned to a VM the following happens:
3439 * - VF's IFACE flags get cleared in the detach path
3440 * - IFACE create is issued by the VF in the attach path
3441 * Due to a bug in the BE3/Skyhawk-R FW
3442 * (Lancer FW doesn't have the bug), the IFACE capability flags
3443 * specified along with the IFACE create cmd issued by a VF are not
3444 * honoured by FW. As a consequence, if a *new* driver
3445 * (that enables/disables IFACE flags in open/close)
3446 * is loaded in the host and an *old* driver is * used by a VM/VF,
3447 * the IFACE gets created *without* the needed flags.
3448 * To avoid this, disable RX-filter flags only for Lancer.
3449 */
3450 if (lancer_chip(adapter)) {
3451 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3452 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3453 }
3454}
3455
Sathya Perla889cd4b2010-05-30 23:33:45 +00003456static int be_close(struct net_device *netdev)
3457{
3458 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003459 struct be_eq_obj *eqo;
3460 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003461
Kalesh APe1ad8e32014-04-14 16:12:41 +05303462 /* This protection is needed as be_close() may be called even when the
3463 * adapter is in cleared state (after eeh perm failure)
3464 */
3465 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3466 return 0;
3467
Kalesh APbcc84142015-08-05 03:27:48 -04003468 be_disable_if_filters(adapter);
3469
Ivan Veceradff345c52013-11-27 08:59:32 +01003470 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3471 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003472 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303473 be_disable_busy_poll(eqo);
3474 }
David S. Miller71237b62013-11-28 18:53:36 -05003475 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003476 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003477
3478 be_async_mcc_disable(adapter);
3479
3480 /* Wait for all pending tx completions to arrive so that
3481 * all tx skbs are freed.
3482 */
Sathya Perlafba87552013-05-08 02:05:50 +00003483 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303484 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003485
3486 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003487
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003488 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003489 if (msix_enabled(adapter))
3490 synchronize_irq(be_msix_vec_get(adapter, eqo));
3491 else
3492 synchronize_irq(netdev->irq);
3493 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003494 }
3495
Sathya Perla889cd4b2010-05-30 23:33:45 +00003496 be_irq_unregister(adapter);
3497
Sathya Perla482c9e72011-06-29 23:33:17 +00003498 return 0;
3499}
3500
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003501static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003502{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003503 struct rss_info *rss = &adapter->rss_info;
3504 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003505 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003506 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003507
3508 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003509 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3510 sizeof(struct be_eth_rx_d));
3511 if (rc)
3512 return rc;
3513 }
3514
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003515 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3516 rxo = default_rxo(adapter);
3517 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3518 rx_frag_size, adapter->if_handle,
3519 false, &rxo->rss_id);
3520 if (rc)
3521 return rc;
3522 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003523
3524 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003525 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003526 rx_frag_size, adapter->if_handle,
3527 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003528 if (rc)
3529 return rc;
3530 }
3531
3532 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003533 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003534 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303535 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003536 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303537 rss->rsstable[j + i] = rxo->rss_id;
3538 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003539 }
3540 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303541 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3542 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003543
3544 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303545 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3546 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303547
3548 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3549 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3550 RSS_INDIR_TABLE_LEN, rss_key);
3551 if (rc) {
3552 rss->rss_flags = RSS_ENABLE_NONE;
3553 return rc;
3554 }
3555
3556 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303557 } else {
3558 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303559 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303560 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003561
Venkata Duvvurue2557872014-04-21 15:38:00 +05303562
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003563 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3564 * which is a queue empty condition
3565 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003566 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003567 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3568
Sathya Perla889cd4b2010-05-30 23:33:45 +00003569 return 0;
3570}
3571
Kalesh APbcc84142015-08-05 03:27:48 -04003572static int be_enable_if_filters(struct be_adapter *adapter)
3573{
3574 int status;
3575
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003576 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003577 if (status)
3578 return status;
3579
3580 /* For BE3 VFs, the PF programs the initial MAC address */
3581 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3582 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3583 adapter->if_handle,
3584 &adapter->pmac_id[0], 0);
3585 if (status)
3586 return status;
3587 }
3588
3589 if (adapter->vlans_added)
3590 be_vid_config(adapter);
3591
3592 be_set_rx_mode(adapter->netdev);
3593
3594 return 0;
3595}
3596
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003597static int be_open(struct net_device *netdev)
3598{
3599 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003600 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003601 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003602 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003603 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003604 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003605
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003606 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003607 if (status)
3608 goto err;
3609
Kalesh APbcc84142015-08-05 03:27:48 -04003610 status = be_enable_if_filters(adapter);
3611 if (status)
3612 goto err;
3613
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003614 status = be_irq_register(adapter);
3615 if (status)
3616 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003617
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003618 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003619 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003620
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003621 for_all_tx_queues(adapter, txo, i)
3622 be_cq_notify(adapter, txo->cq.id, true, 0);
3623
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003624 be_async_mcc_enable(adapter);
3625
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003626 for_all_evt_queues(adapter, eqo, i) {
3627 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303628 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003629 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003630 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003631 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003632
Sathya Perla323ff712012-09-28 04:39:43 +00003633 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003634 if (!status)
3635 be_link_status_update(adapter, link_status);
3636
Sathya Perlafba87552013-05-08 02:05:50 +00003637 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303638 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003639 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303640
Sathya Perla889cd4b2010-05-30 23:33:45 +00003641 return 0;
3642err:
3643 be_close(adapter->netdev);
3644 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003645}
3646
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003647static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3648{
3649 u32 addr;
3650
3651 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3652
3653 mac[5] = (u8)(addr & 0xFF);
3654 mac[4] = (u8)((addr >> 8) & 0xFF);
3655 mac[3] = (u8)((addr >> 16) & 0xFF);
3656 /* Use the OUI from the current MAC address */
3657 memcpy(mac, adapter->netdev->dev_addr, 3);
3658}
3659
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003660/*
3661 * Generate a seed MAC address from the PF MAC Address using jhash.
3662 * MAC Address for VFs are assigned incrementally starting from the seed.
3663 * These addresses are programmed in the ASIC by the PF and the VF driver
3664 * queries for the MAC address during its probe.
3665 */
Sathya Perla4c876612013-02-03 20:30:11 +00003666static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003667{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003668 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003669 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003670 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003671 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003672
3673 be_vf_eth_addr_generate(adapter, mac);
3674
Sathya Perla11ac75e2011-12-13 00:58:50 +00003675 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303676 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003677 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003678 vf_cfg->if_handle,
3679 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303680 else
3681 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3682 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003683
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003684 if (status)
3685 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303686 "Mac address assignment failed for VF %d\n",
3687 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003688 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003689 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003690
3691 mac[5] += 1;
3692 }
3693 return status;
3694}
3695
Sathya Perla4c876612013-02-03 20:30:11 +00003696static int be_vfs_mac_query(struct be_adapter *adapter)
3697{
3698 int status, vf;
3699 u8 mac[ETH_ALEN];
3700 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003701
3702 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303703 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3704 mac, vf_cfg->if_handle,
3705 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003706 if (status)
3707 return status;
3708 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3709 }
3710 return 0;
3711}
3712
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003713static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003714{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003715 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003716 u32 vf;
3717
Sathya Perla257a3fe2013-06-14 15:54:51 +05303718 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003719 dev_warn(&adapter->pdev->dev,
3720 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003721 goto done;
3722 }
3723
Sathya Perlab4c1df92013-05-08 02:05:47 +00003724 pci_disable_sriov(adapter->pdev);
3725
Sathya Perla11ac75e2011-12-13 00:58:50 +00003726 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303727 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003728 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3729 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303730 else
3731 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3732 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003733
Sathya Perla11ac75e2011-12-13 00:58:50 +00003734 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3735 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003736
3737 if (BE3_chip(adapter))
3738 be_cmd_set_hsw_config(adapter, 0, 0,
3739 adapter->if_handle,
3740 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003741done:
3742 kfree(adapter->vf_cfg);
3743 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303744 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003745}
3746
Sathya Perla77071332013-08-27 16:57:34 +05303747static void be_clear_queues(struct be_adapter *adapter)
3748{
3749 be_mcc_queues_destroy(adapter);
3750 be_rx_cqs_destroy(adapter);
3751 be_tx_queues_destroy(adapter);
3752 be_evt_queues_destroy(adapter);
3753}
3754
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303755static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003756{
Sathya Perla191eb752012-02-23 18:50:13 +00003757 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3758 cancel_delayed_work_sync(&adapter->work);
3759 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3760 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303761}
3762
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003763static void be_cancel_err_detection(struct be_adapter *adapter)
3764{
3765 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3766 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3767 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3768 }
3769}
3770
Sathya Perlac9c47142014-03-27 10:46:19 +05303771static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3772{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003773 struct net_device *netdev = adapter->netdev;
3774
Sathya Perlac9c47142014-03-27 10:46:19 +05303775 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3776 be_cmd_manage_iface(adapter, adapter->if_handle,
3777 OP_CONVERT_TUNNEL_TO_NORMAL);
3778
3779 if (adapter->vxlan_port)
3780 be_cmd_set_vxlan_port(adapter, 0);
3781
3782 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3783 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003784
3785 netdev->hw_enc_features = 0;
3786 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303787 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303788}
3789
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003790static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3791 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05003792{
3793 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003794 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3795 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003796 u16 num_vf_qs = 1;
3797
Somnath Koturde2b1e02016-06-06 07:22:10 -04003798 /* Distribute the queue resources among the PF and it's VFs */
3799 if (num_vfs) {
3800 /* Divide the rx queues evenly among the VFs and the PF, capped
3801 * at VF-EQ-count. Any remainder queues belong to the PF.
3802 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05303803 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3804 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05003805
Somnath Koturde2b1e02016-06-06 07:22:10 -04003806 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
3807 * RSS Tables per port. Provide RSS on VFs, only if number of
3808 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05003809 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04003810 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05003811 num_vf_qs = 1;
3812 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003813
3814 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3815 * which are modifiable using SET_PROFILE_CONFIG cmd.
3816 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04003817 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
3818 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003819
3820 /* If RSS IFACE capability flags are modifiable for a VF, set the
3821 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3822 * more than 1 RSSQ is available for a VF.
3823 * Otherwise, provision only 1 queue pair for VF.
3824 */
3825 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3826 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3827 if (num_vf_qs > 1) {
3828 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3829 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3830 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3831 } else {
3832 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3833 BE_IF_FLAGS_DEFQ_RSS);
3834 }
3835 } else {
3836 num_vf_qs = 1;
3837 }
3838
3839 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3840 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3841 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3842 }
3843
3844 vft_res->vf_if_cap_flags = vf_if_cap_flags;
3845 vft_res->max_rx_qs = num_vf_qs;
3846 vft_res->max_rss_qs = num_vf_qs;
3847 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
3848 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
3849
3850 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3851 * among the PF and it's VFs, if the fields are changeable
3852 */
3853 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3854 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
3855
3856 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3857 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
3858
3859 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3860 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
3861
3862 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3863 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05003864}
3865
Somnath Koturb05004a2013-12-05 12:08:16 +05303866static int be_clear(struct be_adapter *adapter)
3867{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003868 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003869 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003870
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303871 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003872
Sathya Perla11ac75e2011-12-13 00:58:50 +00003873 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003874 be_vf_clear(adapter);
3875
Vasundhara Volambec84e62014-06-30 13:01:32 +05303876 /* Re-configure FW to distribute resources evenly across max-supported
3877 * number of VFs, only when VFs are not already enabled.
3878 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003879 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3880 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003881 be_calculate_vf_res(adapter,
3882 pci_sriov_get_totalvfs(pdev),
3883 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303884 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003885 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003886 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05003887 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303888
Sathya Perlac9c47142014-03-27 10:46:19 +05303889 be_disable_vxlan_offloads(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003890 kfree(adapter->pmac_id);
3891 adapter->pmac_id = NULL;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003892
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003893 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003894
Sathya Perla77071332013-08-27 16:57:34 +05303895 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003896
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003897 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303898 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003899 return 0;
3900}
3901
Sathya Perla4c876612013-02-03 20:30:11 +00003902static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003903{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303904 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04003905 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00003906 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003907 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003908
Kalesh AP0700d812015-01-20 03:51:43 -05003909 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003910 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003911
Sathya Perla4c876612013-02-03 20:30:11 +00003912 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303913 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04003914 status = be_cmd_get_profile_config(adapter, &res, NULL,
3915 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003916 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303917 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003918 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303919 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003920 /* Prevent VFs from enabling VLAN promiscuous
3921 * mode
3922 */
3923 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3924 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303925 }
Sathya Perla4c876612013-02-03 20:30:11 +00003926
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003927 /* PF should enable IF flags during proxy if_create call */
3928 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04003929 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3930 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003931 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003932 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003933 }
Kalesh AP0700d812015-01-20 03:51:43 -05003934
3935 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003936}
3937
Sathya Perla39f1d942012-05-08 19:41:24 +00003938static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003939{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003940 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003941 int vf;
3942
Sathya Perla39f1d942012-05-08 19:41:24 +00003943 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3944 GFP_KERNEL);
3945 if (!adapter->vf_cfg)
3946 return -ENOMEM;
3947
Sathya Perla11ac75e2011-12-13 00:58:50 +00003948 for_all_vfs(adapter, vf_cfg, vf) {
3949 vf_cfg->if_handle = -1;
3950 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003951 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003952 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003953}
3954
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003955static int be_vf_setup(struct be_adapter *adapter)
3956{
Sathya Perla4c876612013-02-03 20:30:11 +00003957 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303958 struct be_vf_cfg *vf_cfg;
3959 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003960 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003961
Sathya Perla257a3fe2013-06-14 15:54:51 +05303962 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003963
3964 status = be_vf_setup_init(adapter);
3965 if (status)
3966 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003967
Sathya Perla4c876612013-02-03 20:30:11 +00003968 if (old_vfs) {
3969 for_all_vfs(adapter, vf_cfg, vf) {
3970 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3971 if (status)
3972 goto err;
3973 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003974
Sathya Perla4c876612013-02-03 20:30:11 +00003975 status = be_vfs_mac_query(adapter);
3976 if (status)
3977 goto err;
3978 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303979 status = be_vfs_if_create(adapter);
3980 if (status)
3981 goto err;
3982
Sathya Perla39f1d942012-05-08 19:41:24 +00003983 status = be_vf_eth_addr_config(adapter);
3984 if (status)
3985 goto err;
3986 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003987
Sathya Perla11ac75e2011-12-13 00:58:50 +00003988 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303989 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003990 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3991 vf + 1);
3992 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303993 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003994 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303995 BE_PRIV_FILTMGMT,
3996 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003997 if (!status) {
3998 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303999 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4000 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004001 }
Sathya Perla04a06022013-07-23 15:25:00 +05304002 }
4003
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304004 /* Allow full available bandwidth */
4005 if (!old_vfs)
4006 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004007
Kalesh APe7bcbd72015-05-06 05:30:32 -04004008 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4009 vf_cfg->if_handle, NULL,
4010 &spoofchk);
4011 if (!status)
4012 vf_cfg->spoofchk = spoofchk;
4013
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304014 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304015 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304016 be_cmd_set_logical_link_config(adapter,
4017 IFLA_VF_LINK_STATE_AUTO,
4018 vf+1);
4019 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004020 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004021
4022 if (!old_vfs) {
4023 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4024 if (status) {
4025 dev_err(dev, "SRIOV enable failed\n");
4026 adapter->num_vfs = 0;
4027 goto err;
4028 }
4029 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304030
Somnath Kotur884476b2016-06-22 08:54:55 -04004031 if (BE3_chip(adapter)) {
4032 /* On BE3, enable VEB only when SRIOV is enabled */
4033 status = be_cmd_set_hsw_config(adapter, 0, 0,
4034 adapter->if_handle,
4035 PORT_FWD_TYPE_VEB, 0);
4036 if (status)
4037 goto err;
4038 }
4039
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304040 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004041 return 0;
4042err:
Sathya Perla4c876612013-02-03 20:30:11 +00004043 dev_err(dev, "VF setup failed\n");
4044 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004045 return status;
4046}
4047
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304048/* Converting function_mode bits on BE3 to SH mc_type enums */
4049
4050static u8 be_convert_mc_type(u32 function_mode)
4051{
Suresh Reddy66064db2014-06-23 16:41:29 +05304052 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304053 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304054 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304055 return FLEX10;
4056 else if (function_mode & VNIC_MODE)
4057 return vNIC2;
4058 else if (function_mode & UMC_ENABLED)
4059 return UMC;
4060 else
4061 return MC_NONE;
4062}
4063
Sathya Perla92bf14a2013-08-27 16:57:32 +05304064/* On BE2/BE3 FW does not suggest the supported limits */
4065static void BEx_get_resources(struct be_adapter *adapter,
4066 struct be_resources *res)
4067{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304068 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304069
4070 if (be_physfn(adapter))
4071 res->max_uc_mac = BE_UC_PMAC_COUNT;
4072 else
4073 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4074
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304075 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4076
4077 if (be_is_mc(adapter)) {
4078 /* Assuming that there are 4 channels per port,
4079 * when multi-channel is enabled
4080 */
4081 if (be_is_qnq_mode(adapter))
4082 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4083 else
4084 /* In a non-qnq multichannel mode, the pvid
4085 * takes up one vlan entry
4086 */
4087 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4088 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304089 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304090 }
4091
Sathya Perla92bf14a2013-08-27 16:57:32 +05304092 res->max_mcast_mac = BE_MAX_MC;
4093
Vasundhara Volama5243da2014-03-11 18:53:07 +05304094 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4095 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4096 * *only* if it is RSS-capable.
4097 */
4098 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004099 be_virtfn(adapter) ||
4100 (be_is_mc(adapter) &&
4101 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304102 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304103 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4104 struct be_resources super_nic_res = {0};
4105
4106 /* On a SuperNIC profile, the driver needs to use the
4107 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4108 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004109 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4110 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4111 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304112 /* Some old versions of BE3 FW don't report max_tx_qs value */
4113 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4114 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304115 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304116 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304117
4118 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4119 !use_sriov && be_physfn(adapter))
4120 res->max_rss_qs = (adapter->be3_native) ?
4121 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4122 res->max_rx_qs = res->max_rss_qs + 1;
4123
Suresh Reddye3dc8672014-01-06 13:02:25 +05304124 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304125 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304126 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4127 else
4128 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304129
4130 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004131 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304132 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4133 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4134}
4135
Sathya Perla30128032011-11-10 19:17:57 +00004136static void be_setup_init(struct be_adapter *adapter)
4137{
4138 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004139 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004140 adapter->if_handle = -1;
4141 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004142 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304143 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004144 if (be_physfn(adapter))
4145 adapter->cmd_privileges = MAX_PRIVILEGES;
4146 else
4147 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004148}
4149
Somnath Koturde2b1e02016-06-06 07:22:10 -04004150/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4151 * However, this HW limitation is not exposed to the host via any SLI cmd.
4152 * As a result, in the case of SRIOV and in particular multi-partition configs
4153 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4154 * for distribution between the VFs. This self-imposed limit will determine the
4155 * no: of VFs for which RSS can be enabled.
4156 */
4157void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4158{
4159 struct be_port_resources port_res = {0};
4160 u8 rss_tables_on_port;
4161 u16 max_vfs = be_max_vfs(adapter);
4162
4163 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4164 RESOURCE_LIMITS, 0);
4165
4166 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4167
4168 /* Each PF Pool's RSS Tables limit =
4169 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4170 */
4171 adapter->pool_res.max_rss_tables =
4172 max_vfs * rss_tables_on_port / port_res.max_vfs;
4173}
4174
Vasundhara Volambec84e62014-06-30 13:01:32 +05304175static int be_get_sriov_config(struct be_adapter *adapter)
4176{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304177 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304178 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304179
Somnath Koturde2b1e02016-06-06 07:22:10 -04004180 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4181 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304182
Vasundhara Volamace40af2015-03-04 00:44:34 -05004183 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304184 if (BE3_chip(adapter) && !res.max_vfs) {
4185 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4186 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4187 }
4188
Sathya Perlad3d18312014-08-01 17:47:30 +05304189 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304190
Vasundhara Volamace40af2015-03-04 00:44:34 -05004191 /* If during previous unload of the driver, the VFs were not disabled,
4192 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4193 * Instead use the TotalVFs value stored in the pci-dev struct.
4194 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304195 old_vfs = pci_num_vf(adapter->pdev);
4196 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004197 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4198 old_vfs);
4199
4200 adapter->pool_res.max_vfs =
4201 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304202 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304203 }
4204
Somnath Koturde2b1e02016-06-06 07:22:10 -04004205 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4206 be_calculate_pf_pool_rss_tables(adapter);
4207 dev_info(&adapter->pdev->dev,
4208 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4209 be_max_pf_pool_rss_tables(adapter));
4210 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304211 return 0;
4212}
4213
Vasundhara Volamace40af2015-03-04 00:44:34 -05004214static void be_alloc_sriov_res(struct be_adapter *adapter)
4215{
4216 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004217 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004218 int status;
4219
4220 be_get_sriov_config(adapter);
4221
4222 if (!old_vfs)
4223 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4224
4225 /* When the HW is in SRIOV capable configuration, the PF-pool
4226 * resources are given to PF during driver load, if there are no
4227 * old VFs. This facility is not available in BE3 FW.
4228 * Also, this is done by FW in Lancer chip.
4229 */
4230 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004231 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004232 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004233 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004234 if (status)
4235 dev_err(&adapter->pdev->dev,
4236 "Failed to optimize SRIOV resources\n");
4237 }
4238}
4239
Sathya Perla92bf14a2013-08-27 16:57:32 +05304240static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004241{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304242 struct device *dev = &adapter->pdev->dev;
4243 struct be_resources res = {0};
4244 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004245
Sathya Perla92bf14a2013-08-27 16:57:32 +05304246 /* For Lancer, SH etc read per-function resource limits from FW.
4247 * GET_FUNC_CONFIG returns per function guaranteed limits.
4248 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4249 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004250 if (BEx_chip(adapter)) {
4251 BEx_get_resources(adapter, &res);
4252 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304253 status = be_cmd_get_func_config(adapter, &res);
4254 if (status)
4255 return status;
4256
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004257 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4258 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4259 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4260 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004261 }
4262
Sathya Perlace7faf02016-06-22 08:54:53 -04004263 /* If RoCE is supported stash away half the EQs for RoCE */
4264 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4265 res.max_evt_qs / 2 : res.max_evt_qs;
4266 adapter->res = res;
4267
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004268 /* If FW supports RSS default queue, then skip creating non-RSS
4269 * queue for non-IP traffic.
4270 */
4271 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4272 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4273
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304274 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4275 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004276 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304277 be_max_vfs(adapter));
4278 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4279 be_max_uc(adapter), be_max_mc(adapter),
4280 be_max_vlans(adapter));
4281
Sathya Perlae2617682016-06-22 08:54:54 -04004282 /* Ensure RX and TX queues are created in pairs at init time */
4283 adapter->cfg_num_rx_irqs =
4284 min_t(u16, netif_get_num_default_rss_queues(),
4285 be_max_qp_irqs(adapter));
4286 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304287 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004288}
4289
Sathya Perla39f1d942012-05-08 19:41:24 +00004290static int be_get_config(struct be_adapter *adapter)
4291{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004292 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304293 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004294
Suresh Reddy980df242015-12-30 01:29:03 -05004295 status = be_cmd_get_cntl_attributes(adapter);
4296 if (status)
4297 return status;
4298
Kalesh APe97e3cd2014-07-17 16:20:26 +05304299 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004300 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304301 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004302
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004303 if (!lancer_chip(adapter) && be_physfn(adapter))
4304 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4305
Sathya Perla6b085ba2015-02-23 04:20:09 -05004306 if (BEx_chip(adapter)) {
4307 level = be_cmd_get_fw_log_level(adapter);
4308 adapter->msg_enable =
4309 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4310 }
4311
4312 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004313 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4314 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004315
Vasundhara Volam21252372015-02-06 08:18:42 -05004316 be_cmd_query_port_name(adapter);
4317
4318 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304319 status = be_cmd_get_active_profile(adapter, &profile_id);
4320 if (!status)
4321 dev_info(&adapter->pdev->dev,
4322 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304323 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304324
Sathya Perla92bf14a2013-08-27 16:57:32 +05304325 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004326}
4327
Sathya Perla95046b92013-07-23 15:25:02 +05304328static int be_mac_setup(struct be_adapter *adapter)
4329{
4330 u8 mac[ETH_ALEN];
4331 int status;
4332
4333 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4334 status = be_cmd_get_perm_mac(adapter, mac);
4335 if (status)
4336 return status;
4337
4338 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4339 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304340 }
4341
Sathya Perla95046b92013-07-23 15:25:02 +05304342 return 0;
4343}
4344
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304345static void be_schedule_worker(struct be_adapter *adapter)
4346{
4347 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4348 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4349}
4350
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304351static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004352{
4353 schedule_delayed_work(&adapter->be_err_detection_work,
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304354 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004355 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4356}
4357
Sathya Perla77071332013-08-27 16:57:34 +05304358static int be_setup_queues(struct be_adapter *adapter)
4359{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304360 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304361 int status;
4362
4363 status = be_evt_queues_create(adapter);
4364 if (status)
4365 goto err;
4366
4367 status = be_tx_qs_create(adapter);
4368 if (status)
4369 goto err;
4370
4371 status = be_rx_cqs_create(adapter);
4372 if (status)
4373 goto err;
4374
4375 status = be_mcc_queues_create(adapter);
4376 if (status)
4377 goto err;
4378
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304379 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4380 if (status)
4381 goto err;
4382
4383 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4384 if (status)
4385 goto err;
4386
Sathya Perla77071332013-08-27 16:57:34 +05304387 return 0;
4388err:
4389 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4390 return status;
4391}
4392
Ajit Khaparde62219062016-02-10 22:45:53 +05304393static int be_if_create(struct be_adapter *adapter)
4394{
4395 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4396 u32 cap_flags = be_if_cap_flags(adapter);
4397 int status;
4398
Sathya Perlae2617682016-06-22 08:54:54 -04004399 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304400 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4401
4402 en_flags &= cap_flags;
4403 /* will enable all the needed filter flags in be_open() */
4404 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4405 &adapter->if_handle, 0);
4406
4407 return status;
4408}
4409
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304410int be_update_queues(struct be_adapter *adapter)
4411{
4412 struct net_device *netdev = adapter->netdev;
4413 int status;
4414
4415 if (netif_running(netdev))
4416 be_close(netdev);
4417
4418 be_cancel_worker(adapter);
4419
4420 /* If any vectors have been shared with RoCE we cannot re-program
4421 * the MSIx table.
4422 */
4423 if (!adapter->num_msix_roce_vec)
4424 be_msix_disable(adapter);
4425
4426 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304427 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4428 if (status)
4429 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304430
4431 if (!msix_enabled(adapter)) {
4432 status = be_msix_enable(adapter);
4433 if (status)
4434 return status;
4435 }
4436
Ajit Khaparde62219062016-02-10 22:45:53 +05304437 status = be_if_create(adapter);
4438 if (status)
4439 return status;
4440
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304441 status = be_setup_queues(adapter);
4442 if (status)
4443 return status;
4444
4445 be_schedule_worker(adapter);
4446
4447 if (netif_running(netdev))
4448 status = be_open(netdev);
4449
4450 return status;
4451}
4452
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004453static inline int fw_major_num(const char *fw_ver)
4454{
4455 int fw_major = 0, i;
4456
4457 i = sscanf(fw_ver, "%d.", &fw_major);
4458 if (i != 1)
4459 return 0;
4460
4461 return fw_major;
4462}
4463
Sathya Perlaf962f842015-02-23 04:20:16 -05004464/* If any VFs are already enabled don't FLR the PF */
4465static bool be_reset_required(struct be_adapter *adapter)
4466{
4467 return pci_num_vf(adapter->pdev) ? false : true;
4468}
4469
4470/* Wait for the FW to be ready and perform the required initialization */
4471static int be_func_init(struct be_adapter *adapter)
4472{
4473 int status;
4474
4475 status = be_fw_wait_ready(adapter);
4476 if (status)
4477 return status;
4478
4479 if (be_reset_required(adapter)) {
4480 status = be_cmd_reset_function(adapter);
4481 if (status)
4482 return status;
4483
4484 /* Wait for interrupts to quiesce after an FLR */
4485 msleep(100);
4486
4487 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304488 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004489 }
4490
4491 /* Tell FW we're ready to fire cmds */
4492 status = be_cmd_fw_init(adapter);
4493 if (status)
4494 return status;
4495
4496 /* Allow interrupts for other ULPs running on NIC function */
4497 be_intr_set(adapter, true);
4498
4499 return 0;
4500}
4501
Sathya Perla5fb379e2009-06-18 00:02:59 +00004502static int be_setup(struct be_adapter *adapter)
4503{
Sathya Perla39f1d942012-05-08 19:41:24 +00004504 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004505 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004506
Sathya Perlaf962f842015-02-23 04:20:16 -05004507 status = be_func_init(adapter);
4508 if (status)
4509 return status;
4510
Sathya Perla30128032011-11-10 19:17:57 +00004511 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004512
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004513 if (!lancer_chip(adapter))
4514 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004515
Suresh Reddy980df242015-12-30 01:29:03 -05004516 /* invoke this cmd first to get pf_num and vf_num which are needed
4517 * for issuing profile related cmds
4518 */
4519 if (!BEx_chip(adapter)) {
4520 status = be_cmd_get_func_config(adapter, NULL);
4521 if (status)
4522 return status;
4523 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004524
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004525 status = be_get_config(adapter);
4526 if (status)
4527 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004528
Somnath Koturde2b1e02016-06-06 07:22:10 -04004529 if (!BE2_chip(adapter) && be_physfn(adapter))
4530 be_alloc_sriov_res(adapter);
4531
4532 status = be_get_resources(adapter);
4533 if (status)
4534 goto err;
4535
4536 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4537 sizeof(*adapter->pmac_id), GFP_KERNEL);
4538 if (!adapter->pmac_id)
4539 return -ENOMEM;
4540
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004541 status = be_msix_enable(adapter);
4542 if (status)
4543 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004544
Kalesh APbcc84142015-08-05 03:27:48 -04004545 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304546 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004547 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004548 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004549
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304550 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4551 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304552 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304553 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004554 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004555 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004556
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004557 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004558
Sathya Perla95046b92013-07-23 15:25:02 +05304559 status = be_mac_setup(adapter);
4560 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004561 goto err;
4562
Kalesh APe97e3cd2014-07-17 16:20:26 +05304563 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304564 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004565
Somnath Koture9e2a902013-10-24 14:37:53 +05304566 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304567 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304568 adapter->fw_ver);
4569 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4570 }
4571
Kalesh AP00d594c2015-01-20 03:51:44 -05004572 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4573 adapter->rx_fc);
4574 if (status)
4575 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4576 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004577
Kalesh AP00d594c2015-01-20 03:51:44 -05004578 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4579 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004580
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304581 if (be_physfn(adapter))
4582 be_cmd_set_logical_link_config(adapter,
4583 IFLA_VF_LINK_STATE_AUTO, 0);
4584
Somnath Kotur884476b2016-06-22 08:54:55 -04004585 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4586 * confusing a linux bridge or OVS that it might be connected to.
4587 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4588 * when SRIOV is not enabled.
4589 */
4590 if (BE3_chip(adapter))
4591 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4592 PORT_FWD_TYPE_PASSTHRU, 0);
4593
Vasundhara Volambec84e62014-06-30 13:01:32 +05304594 if (adapter->num_vfs)
4595 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004596
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004597 status = be_cmd_get_phy_info(adapter);
4598 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004599 adapter->phy.fc_autoneg = 1;
4600
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304601 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304602 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004603 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004604err:
4605 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004606 return status;
4607}
4608
Ivan Vecera66268732011-12-08 01:31:21 +00004609#ifdef CONFIG_NET_POLL_CONTROLLER
4610static void be_netpoll(struct net_device *netdev)
4611{
4612 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004613 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004614 int i;
4615
Sathya Perlae49cc342012-11-27 19:50:02 +00004616 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004617 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004618 napi_schedule(&eqo->napi);
4619 }
Ivan Vecera66268732011-12-08 01:31:21 +00004620}
4621#endif
4622
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004623int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4624{
4625 const struct firmware *fw;
4626 int status;
4627
4628 if (!netif_running(adapter->netdev)) {
4629 dev_err(&adapter->pdev->dev,
4630 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304631 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004632 }
4633
4634 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4635 if (status)
4636 goto fw_exit;
4637
4638 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4639
4640 if (lancer_chip(adapter))
4641 status = lancer_fw_download(adapter, fw);
4642 else
4643 status = be_fw_download(adapter, fw);
4644
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004645 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304646 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004647
Ajit Khaparde84517482009-09-04 03:12:16 +00004648fw_exit:
4649 release_firmware(fw);
4650 return status;
4651}
4652
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004653static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4654 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004655{
4656 struct be_adapter *adapter = netdev_priv(dev);
4657 struct nlattr *attr, *br_spec;
4658 int rem;
4659 int status = 0;
4660 u16 mode = 0;
4661
4662 if (!sriov_enabled(adapter))
4663 return -EOPNOTSUPP;
4664
4665 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004666 if (!br_spec)
4667 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004668
4669 nla_for_each_nested(attr, br_spec, rem) {
4670 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4671 continue;
4672
Thomas Grafb7c1a312014-11-26 13:42:17 +01004673 if (nla_len(attr) < sizeof(mode))
4674 return -EINVAL;
4675
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004676 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004677 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4678 return -EOPNOTSUPP;
4679
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004680 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4681 return -EINVAL;
4682
4683 status = be_cmd_set_hsw_config(adapter, 0, 0,
4684 adapter->if_handle,
4685 mode == BRIDGE_MODE_VEPA ?
4686 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004687 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004688 if (status)
4689 goto err;
4690
4691 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4692 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4693
4694 return status;
4695 }
4696err:
4697 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4698 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4699
4700 return status;
4701}
4702
4703static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004704 struct net_device *dev, u32 filter_mask,
4705 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004706{
4707 struct be_adapter *adapter = netdev_priv(dev);
4708 int status = 0;
4709 u8 hsw_mode;
4710
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004711 /* BE and Lancer chips support VEB mode only */
4712 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004713 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4714 if (!pci_sriov_get_totalvfs(adapter->pdev))
4715 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004716 hsw_mode = PORT_FWD_TYPE_VEB;
4717 } else {
4718 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004719 adapter->if_handle, &hsw_mode,
4720 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004721 if (status)
4722 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004723
4724 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4725 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004726 }
4727
4728 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4729 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004730 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004731 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004732}
4733
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004734/* VxLAN offload Notes:
4735 *
4736 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4737 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4738 * is expected to work across all types of IP tunnels once exported. Skyhawk
4739 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304740 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4741 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4742 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004743 *
4744 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4745 * adds more than one port, disable offloads and don't re-enable them again
4746 * until after all the tunnels are removed.
4747 */
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004748static void be_add_vxlan_port(struct net_device *netdev,
4749 struct udp_tunnel_info *ti)
Sathya Perlac9c47142014-03-27 10:46:19 +05304750{
4751 struct be_adapter *adapter = netdev_priv(netdev);
4752 struct device *dev = &adapter->pdev->dev;
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004753 __be16 port = ti->port;
Sathya Perlac9c47142014-03-27 10:46:19 +05304754 int status;
4755
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004756 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4757 return;
4758
Ivan Veceraaf19e682015-08-14 22:30:01 +02004759 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304760 return;
4761
Jiri Benc1e5b3112015-09-17 16:11:13 +02004762 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4763 adapter->vxlan_port_aliases++;
4764 return;
4765 }
4766
Sathya Perlac9c47142014-03-27 10:46:19 +05304767 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304768 dev_info(dev,
4769 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004770 dev_info(dev, "Disabling VxLAN offloads\n");
4771 adapter->vxlan_port_count++;
4772 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304773 }
4774
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004775 if (adapter->vxlan_port_count++ >= 1)
4776 return;
4777
Sathya Perlac9c47142014-03-27 10:46:19 +05304778 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4779 OP_CONVERT_NORMAL_TO_TUNNEL);
4780 if (status) {
4781 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4782 goto err;
4783 }
4784
4785 status = be_cmd_set_vxlan_port(adapter, port);
4786 if (status) {
4787 dev_warn(dev, "Failed to add VxLAN port\n");
4788 goto err;
4789 }
4790 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4791 adapter->vxlan_port = port;
4792
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004793 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4794 NETIF_F_TSO | NETIF_F_TSO6 |
4795 NETIF_F_GSO_UDP_TUNNEL;
4796 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304797 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004798
Sathya Perlac9c47142014-03-27 10:46:19 +05304799 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4800 be16_to_cpu(port));
4801 return;
4802err:
4803 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304804}
4805
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004806static void be_del_vxlan_port(struct net_device *netdev,
4807 struct udp_tunnel_info *ti)
Sathya Perlac9c47142014-03-27 10:46:19 +05304808{
4809 struct be_adapter *adapter = netdev_priv(netdev);
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004810 __be16 port = ti->port;
4811
4812 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4813 return;
Sathya Perlac9c47142014-03-27 10:46:19 +05304814
Ivan Veceraaf19e682015-08-14 22:30:01 +02004815 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304816 return;
4817
4818 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004819 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304820
Jiri Benc1e5b3112015-09-17 16:11:13 +02004821 if (adapter->vxlan_port_aliases) {
4822 adapter->vxlan_port_aliases--;
4823 return;
4824 }
4825
Sathya Perlac9c47142014-03-27 10:46:19 +05304826 be_disable_vxlan_offloads(adapter);
4827
4828 dev_info(&adapter->pdev->dev,
4829 "Disabled VxLAN offloads for UDP port %d\n",
4830 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004831done:
4832 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304833}
Joe Stringer725d5482014-11-13 16:38:13 -08004834
Jesse Gross5f352272014-12-23 22:37:26 -08004835static netdev_features_t be_features_check(struct sk_buff *skb,
4836 struct net_device *dev,
4837 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004838{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304839 struct be_adapter *adapter = netdev_priv(dev);
4840 u8 l4_hdr = 0;
4841
4842 /* The code below restricts offload features for some tunneled packets.
4843 * Offload features for normal (non tunnel) packets are unchanged.
4844 */
4845 if (!skb->encapsulation ||
4846 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4847 return features;
4848
4849 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4850 * should disable tunnel offload features if it's not a VxLAN packet,
4851 * as tunnel offloads have been enabled only for VxLAN. This is done to
4852 * allow other tunneled traffic like GRE work fine while VxLAN
4853 * offloads are configured in Skyhawk-R.
4854 */
4855 switch (vlan_get_protocol(skb)) {
4856 case htons(ETH_P_IP):
4857 l4_hdr = ip_hdr(skb)->protocol;
4858 break;
4859 case htons(ETH_P_IPV6):
4860 l4_hdr = ipv6_hdr(skb)->nexthdr;
4861 break;
4862 default:
4863 return features;
4864 }
4865
4866 if (l4_hdr != IPPROTO_UDP ||
4867 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4868 skb->inner_protocol != htons(ETH_P_TEB) ||
4869 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4870 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08004871 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304872
4873 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004874}
Sathya Perlac9c47142014-03-27 10:46:19 +05304875
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304876static int be_get_phys_port_id(struct net_device *dev,
4877 struct netdev_phys_item_id *ppid)
4878{
4879 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4880 struct be_adapter *adapter = netdev_priv(dev);
4881 u8 *id;
4882
4883 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4884 return -ENOSPC;
4885
4886 ppid->id[0] = adapter->hba_port_num + 1;
4887 id = &ppid->id[1];
4888 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4889 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4890 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4891
4892 ppid->id_len = id_len;
4893
4894 return 0;
4895}
4896
stephen hemmingere5686ad2012-01-05 19:10:25 +00004897static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004898 .ndo_open = be_open,
4899 .ndo_stop = be_close,
4900 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004901 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004902 .ndo_set_mac_address = be_mac_addr_set,
4903 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004904 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004905 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004906 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4907 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004908 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004909 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004910 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004911 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304912 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004913 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00004914#ifdef CONFIG_NET_POLL_CONTROLLER
4915 .ndo_poll_controller = be_netpoll,
4916#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004917 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4918 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304919#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304920 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304921#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004922 .ndo_udp_tunnel_add = be_add_vxlan_port,
4923 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004924 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304925 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004926};
4927
4928static void be_netdev_init(struct net_device *netdev)
4929{
4930 struct be_adapter *adapter = netdev_priv(netdev);
4931
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004932 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004933 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004934 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05304935 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004936 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004937
4938 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004939 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004940
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004941 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004942 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004943
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004944 netdev->priv_flags |= IFF_UNICAST_FLT;
4945
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004946 netdev->flags |= IFF_MULTICAST;
4947
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05304948 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004949
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004950 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004951
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004952 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004953}
4954
Kalesh AP87ac1a52015-02-23 04:20:15 -05004955static void be_cleanup(struct be_adapter *adapter)
4956{
4957 struct net_device *netdev = adapter->netdev;
4958
4959 rtnl_lock();
4960 netif_device_detach(netdev);
4961 if (netif_running(netdev))
4962 be_close(netdev);
4963 rtnl_unlock();
4964
4965 be_clear(adapter);
4966}
4967
Kalesh AP484d76f2015-02-23 04:20:14 -05004968static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004969{
Kalesh APd0e1b312015-02-23 04:20:12 -05004970 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004971 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004972
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004973 status = be_setup(adapter);
4974 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05004975 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004976
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02004977 rtnl_lock();
4978 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05004979 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02004980 rtnl_unlock();
4981
4982 if (status)
4983 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004984
Kalesh APd0e1b312015-02-23 04:20:12 -05004985 netif_device_attach(netdev);
4986
Kalesh AP484d76f2015-02-23 04:20:14 -05004987 return 0;
4988}
4989
4990static int be_err_recover(struct be_adapter *adapter)
4991{
Kalesh AP484d76f2015-02-23 04:20:14 -05004992 int status;
4993
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304994 /* Error recovery is supported only Lancer as of now */
4995 if (!lancer_chip(adapter))
4996 return -EIO;
4997
4998 /* Wait for adapter to reach quiescent state before
4999 * destroying queues
5000 */
5001 status = be_fw_wait_ready(adapter);
5002 if (status)
5003 goto err;
5004
5005 be_cleanup(adapter);
5006
Kalesh AP484d76f2015-02-23 04:20:14 -05005007 status = be_resume(adapter);
5008 if (status)
5009 goto err;
5010
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005011 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005012err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005013 return status;
5014}
5015
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005016static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005017{
5018 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005019 container_of(work, struct be_adapter,
5020 be_err_detection_work.work);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305021 struct device *dev = &adapter->pdev->dev;
5022 int recovery_status;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305023 int delay = ERR_DETECTION_DELAY;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005024
5025 be_detect_error(adapter);
5026
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305027 if (be_check_error(adapter, BE_ERROR_HW))
5028 recovery_status = be_err_recover(adapter);
5029 else
5030 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005031
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305032 if (!recovery_status) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305033 adapter->recovery_retries = 0;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305034 dev_info(dev, "Adapter recovery successful\n");
5035 goto reschedule_task;
5036 } else if (be_virtfn(adapter)) {
5037 /* For VFs, check if PF have allocated resources
5038 * every second.
5039 */
5040 dev_err(dev, "Re-trying adapter recovery\n");
5041 goto reschedule_task;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305042 } else if (adapter->recovery_retries++ <
5043 MAX_ERR_RECOVERY_RETRY_COUNT) {
5044 /* In case of another error during recovery, it takes 30 sec
5045 * for adapter to come out of error. Retry error recovery after
5046 * this time interval.
5047 */
5048 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
5049 delay = ERR_RECOVERY_RETRY_DELAY;
5050 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305051 } else {
5052 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005053 }
5054
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305055 return;
5056reschedule_task:
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305057 be_schedule_err_detection(adapter, delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005058}
5059
Vasundhara Volam21252372015-02-06 08:18:42 -05005060static void be_log_sfp_info(struct be_adapter *adapter)
5061{
5062 int status;
5063
5064 status = be_cmd_query_sfp_info(adapter);
5065 if (!status) {
5066 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305067 "Port %c: %s Vendor: %s part no: %s",
5068 adapter->port_name,
5069 be_misconfig_evt_port_state[adapter->phy_state],
5070 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005071 adapter->phy.vendor_pn);
5072 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305073 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005074}
5075
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005076static void be_worker(struct work_struct *work)
5077{
5078 struct be_adapter *adapter =
5079 container_of(work, struct be_adapter, work.work);
5080 struct be_rx_obj *rxo;
5081 int i;
5082
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005083 if (be_physfn(adapter) &&
5084 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5085 be_cmd_get_die_temperature(adapter);
5086
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005087 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005088 * mcc completions
5089 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005090 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005091 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005092 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005093 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005094 goto reschedule;
5095 }
5096
5097 if (!adapter->stats_cmd_sent) {
5098 if (lancer_chip(adapter))
5099 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305100 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005101 else
5102 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5103 }
5104
5105 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305106 /* Replenish RX-queues starved due to memory
5107 * allocation failures.
5108 */
5109 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305110 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005111 }
5112
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005113 /* EQ-delay update for Skyhawk is done while notifying EQ */
5114 if (!skyhawk_chip(adapter))
5115 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005116
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305117 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005118 be_log_sfp_info(adapter);
5119
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005120reschedule:
5121 adapter->work_counter++;
5122 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5123}
5124
Sathya Perla78fad34e2015-02-23 04:20:08 -05005125static void be_unmap_pci_bars(struct be_adapter *adapter)
5126{
5127 if (adapter->csr)
5128 pci_iounmap(adapter->pdev, adapter->csr);
5129 if (adapter->db)
5130 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005131 if (adapter->pcicfg && adapter->pcicfg_mapped)
5132 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005133}
5134
5135static int db_bar(struct be_adapter *adapter)
5136{
Kalesh AP18c57c72015-05-06 05:30:38 -04005137 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005138 return 0;
5139 else
5140 return 4;
5141}
5142
5143static int be_roce_map_pci_bars(struct be_adapter *adapter)
5144{
5145 if (skyhawk_chip(adapter)) {
5146 adapter->roce_db.size = 4096;
5147 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5148 db_bar(adapter));
5149 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5150 db_bar(adapter));
5151 }
5152 return 0;
5153}
5154
5155static int be_map_pci_bars(struct be_adapter *adapter)
5156{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005157 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005158 u8 __iomem *addr;
5159 u32 sli_intf;
5160
5161 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5162 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5163 SLI_INTF_FAMILY_SHIFT;
5164 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5165
5166 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005167 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005168 if (!adapter->csr)
5169 return -ENOMEM;
5170 }
5171
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005172 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005173 if (!addr)
5174 goto pci_map_err;
5175 adapter->db = addr;
5176
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005177 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5178 if (be_physfn(adapter)) {
5179 /* PCICFG is the 2nd BAR in BE2 */
5180 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5181 if (!addr)
5182 goto pci_map_err;
5183 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005184 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005185 } else {
5186 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005187 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005188 }
5189 }
5190
Sathya Perla78fad34e2015-02-23 04:20:08 -05005191 be_roce_map_pci_bars(adapter);
5192 return 0;
5193
5194pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005195 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005196 be_unmap_pci_bars(adapter);
5197 return -ENOMEM;
5198}
5199
5200static void be_drv_cleanup(struct be_adapter *adapter)
5201{
5202 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5203 struct device *dev = &adapter->pdev->dev;
5204
5205 if (mem->va)
5206 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5207
5208 mem = &adapter->rx_filter;
5209 if (mem->va)
5210 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5211
5212 mem = &adapter->stats_cmd;
5213 if (mem->va)
5214 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5215}
5216
5217/* Allocate and initialize various fields in be_adapter struct */
5218static int be_drv_init(struct be_adapter *adapter)
5219{
5220 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5221 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5222 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5223 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5224 struct device *dev = &adapter->pdev->dev;
5225 int status = 0;
5226
5227 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305228 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5229 &mbox_mem_alloc->dma,
5230 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005231 if (!mbox_mem_alloc->va)
5232 return -ENOMEM;
5233
5234 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5235 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5236 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005237
5238 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5239 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5240 &rx_filter->dma, GFP_KERNEL);
5241 if (!rx_filter->va) {
5242 status = -ENOMEM;
5243 goto free_mbox;
5244 }
5245
5246 if (lancer_chip(adapter))
5247 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5248 else if (BE2_chip(adapter))
5249 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5250 else if (BE3_chip(adapter))
5251 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5252 else
5253 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5254 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5255 &stats_cmd->dma, GFP_KERNEL);
5256 if (!stats_cmd->va) {
5257 status = -ENOMEM;
5258 goto free_rx_filter;
5259 }
5260
5261 mutex_init(&adapter->mbox_lock);
5262 spin_lock_init(&adapter->mcc_lock);
5263 spin_lock_init(&adapter->mcc_cq_lock);
5264 init_completion(&adapter->et_cmd_compl);
5265
5266 pci_save_state(adapter->pdev);
5267
5268 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005269 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5270 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005271
5272 adapter->rx_fc = true;
5273 adapter->tx_fc = true;
5274
5275 /* Must be a power of 2 or else MODULO will BUG_ON */
5276 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005277
5278 return 0;
5279
5280free_rx_filter:
5281 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5282free_mbox:
5283 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5284 mbox_mem_alloc->dma);
5285 return status;
5286}
5287
5288static void be_remove(struct pci_dev *pdev)
5289{
5290 struct be_adapter *adapter = pci_get_drvdata(pdev);
5291
5292 if (!adapter)
5293 return;
5294
5295 be_roce_dev_remove(adapter);
5296 be_intr_set(adapter, false);
5297
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005298 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005299
5300 unregister_netdev(adapter->netdev);
5301
5302 be_clear(adapter);
5303
5304 /* tell fw we're done with firing cmds */
5305 be_cmd_fw_clean(adapter);
5306
5307 be_unmap_pci_bars(adapter);
5308 be_drv_cleanup(adapter);
5309
5310 pci_disable_pcie_error_reporting(pdev);
5311
5312 pci_release_regions(pdev);
5313 pci_disable_device(pdev);
5314
5315 free_netdev(adapter->netdev);
5316}
5317
Arnd Bergmann9a032592015-05-18 23:06:45 +02005318static ssize_t be_hwmon_show_temp(struct device *dev,
5319 struct device_attribute *dev_attr,
5320 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305321{
5322 struct be_adapter *adapter = dev_get_drvdata(dev);
5323
5324 /* Unit: millidegree Celsius */
5325 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5326 return -EIO;
5327 else
5328 return sprintf(buf, "%u\n",
5329 adapter->hwmon_info.be_on_die_temp * 1000);
5330}
5331
5332static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5333 be_hwmon_show_temp, NULL, 1);
5334
5335static struct attribute *be_hwmon_attrs[] = {
5336 &sensor_dev_attr_temp1_input.dev_attr.attr,
5337 NULL
5338};
5339
5340ATTRIBUTE_GROUPS(be_hwmon);
5341
Sathya Perlad3791422012-09-28 04:39:44 +00005342static char *mc_name(struct be_adapter *adapter)
5343{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305344 char *str = ""; /* default */
5345
5346 switch (adapter->mc_type) {
5347 case UMC:
5348 str = "UMC";
5349 break;
5350 case FLEX10:
5351 str = "FLEX10";
5352 break;
5353 case vNIC1:
5354 str = "vNIC-1";
5355 break;
5356 case nPAR:
5357 str = "nPAR";
5358 break;
5359 case UFP:
5360 str = "UFP";
5361 break;
5362 case vNIC2:
5363 str = "vNIC-2";
5364 break;
5365 default:
5366 str = "";
5367 }
5368
5369 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005370}
5371
5372static inline char *func_name(struct be_adapter *adapter)
5373{
5374 return be_physfn(adapter) ? "PF" : "VF";
5375}
5376
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005377static inline char *nic_name(struct pci_dev *pdev)
5378{
5379 switch (pdev->device) {
5380 case OC_DEVICE_ID1:
5381 return OC_NAME;
5382 case OC_DEVICE_ID2:
5383 return OC_NAME_BE;
5384 case OC_DEVICE_ID3:
5385 case OC_DEVICE_ID4:
5386 return OC_NAME_LANCER;
5387 case BE_DEVICE_ID2:
5388 return BE3_NAME;
5389 case OC_DEVICE_ID5:
5390 case OC_DEVICE_ID6:
5391 return OC_NAME_SH;
5392 default:
5393 return BE_NAME;
5394 }
5395}
5396
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005397static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005398{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005399 struct be_adapter *adapter;
5400 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005401 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005402
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305403 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5404
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005405 status = pci_enable_device(pdev);
5406 if (status)
5407 goto do_none;
5408
5409 status = pci_request_regions(pdev, DRV_NAME);
5410 if (status)
5411 goto disable_dev;
5412 pci_set_master(pdev);
5413
Sathya Perla7f640062012-06-05 19:37:20 +00005414 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305415 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005416 status = -ENOMEM;
5417 goto rel_reg;
5418 }
5419 adapter = netdev_priv(netdev);
5420 adapter->pdev = pdev;
5421 pci_set_drvdata(pdev, adapter);
5422 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005423 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005424
Russell King4c15c242013-06-26 23:49:11 +01005425 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005426 if (!status) {
5427 netdev->features |= NETIF_F_HIGHDMA;
5428 } else {
Russell King4c15c242013-06-26 23:49:11 +01005429 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005430 if (status) {
5431 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5432 goto free_netdev;
5433 }
5434 }
5435
Kalesh AP2f951a92014-09-12 17:39:21 +05305436 status = pci_enable_pcie_error_reporting(pdev);
5437 if (!status)
5438 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005439
Sathya Perla78fad34e2015-02-23 04:20:08 -05005440 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005441 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005442 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005443
Sathya Perla78fad34e2015-02-23 04:20:08 -05005444 status = be_drv_init(adapter);
5445 if (status)
5446 goto unmap_bars;
5447
Sathya Perla5fb379e2009-06-18 00:02:59 +00005448 status = be_setup(adapter);
5449 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005450 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005451
Sathya Perla3abcded2010-10-03 22:12:27 -07005452 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005453 status = register_netdev(netdev);
5454 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005455 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005456
Parav Pandit045508a2012-03-26 14:27:13 +00005457 be_roce_dev_add(adapter);
5458
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305459 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005460
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305461 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005462 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305463 adapter->hwmon_info.hwmon_dev =
5464 devm_hwmon_device_register_with_groups(&pdev->dev,
5465 DRV_NAME,
5466 adapter,
5467 be_hwmon_groups);
5468 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5469 }
5470
Sathya Perlad3791422012-09-28 04:39:44 +00005471 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005472 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005473
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005474 return 0;
5475
Sathya Perla5fb379e2009-06-18 00:02:59 +00005476unsetup:
5477 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005478drv_cleanup:
5479 be_drv_cleanup(adapter);
5480unmap_bars:
5481 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005482free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005483 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005484rel_reg:
5485 pci_release_regions(pdev);
5486disable_dev:
5487 pci_disable_device(pdev);
5488do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005489 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005490 return status;
5491}
5492
5493static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5494{
5495 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005496
Ajit Khaparded4360d62013-11-22 12:51:09 -06005497 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005498 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005499
Kalesh AP87ac1a52015-02-23 04:20:15 -05005500 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005501
5502 pci_save_state(pdev);
5503 pci_disable_device(pdev);
5504 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5505 return 0;
5506}
5507
Kalesh AP484d76f2015-02-23 04:20:14 -05005508static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005509{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005510 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005511 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005512
5513 status = pci_enable_device(pdev);
5514 if (status)
5515 return status;
5516
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005517 pci_restore_state(pdev);
5518
Kalesh AP484d76f2015-02-23 04:20:14 -05005519 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005520 if (status)
5521 return status;
5522
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305523 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005524
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005525 return 0;
5526}
5527
Sathya Perla82456b02010-02-17 01:35:37 +00005528/*
5529 * An FLR will stop BE from DMAing any data.
5530 */
5531static void be_shutdown(struct pci_dev *pdev)
5532{
5533 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005534
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005535 if (!adapter)
5536 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005537
Devesh Sharmad114f992014-06-10 19:32:15 +05305538 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005539 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005540 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005541
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005542 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005543
Ajit Khaparde57841862011-04-06 18:08:43 +00005544 be_cmd_reset_function(adapter);
5545
Sathya Perla82456b02010-02-17 01:35:37 +00005546 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005547}
5548
Sathya Perlacf588472010-02-14 21:22:01 +00005549static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305550 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005551{
5552 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005553
5554 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5555
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05305556 be_roce_dev_remove(adapter);
5557
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305558 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5559 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005560
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005561 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005562
Kalesh AP87ac1a52015-02-23 04:20:15 -05005563 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005564 }
Sathya Perlacf588472010-02-14 21:22:01 +00005565
5566 if (state == pci_channel_io_perm_failure)
5567 return PCI_ERS_RESULT_DISCONNECT;
5568
5569 pci_disable_device(pdev);
5570
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005571 /* The error could cause the FW to trigger a flash debug dump.
5572 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005573 * can cause it not to recover; wait for it to finish.
5574 * Wait only for first function as it is needed only once per
5575 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005576 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005577 if (pdev->devfn == 0)
5578 ssleep(30);
5579
Sathya Perlacf588472010-02-14 21:22:01 +00005580 return PCI_ERS_RESULT_NEED_RESET;
5581}
5582
5583static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5584{
5585 struct be_adapter *adapter = pci_get_drvdata(pdev);
5586 int status;
5587
5588 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005589
5590 status = pci_enable_device(pdev);
5591 if (status)
5592 return PCI_ERS_RESULT_DISCONNECT;
5593
5594 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005595 pci_restore_state(pdev);
5596
5597 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005598 dev_info(&adapter->pdev->dev,
5599 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005600 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005601 if (status)
5602 return PCI_ERS_RESULT_DISCONNECT;
5603
Sathya Perlad6b6d982012-09-05 01:56:48 +00005604 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305605 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005606 return PCI_ERS_RESULT_RECOVERED;
5607}
5608
5609static void be_eeh_resume(struct pci_dev *pdev)
5610{
5611 int status = 0;
5612 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005613
5614 dev_info(&adapter->pdev->dev, "EEH resume\n");
5615
5616 pci_save_state(pdev);
5617
Kalesh AP484d76f2015-02-23 04:20:14 -05005618 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005619 if (status)
5620 goto err;
5621
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05305622 be_roce_dev_add(adapter);
5623
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305624 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00005625 return;
5626err:
5627 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005628}
5629
Vasundhara Volamace40af2015-03-04 00:44:34 -05005630static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5631{
5632 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04005633 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05005634 int status;
5635
5636 if (!num_vfs)
5637 be_vf_clear(adapter);
5638
5639 adapter->num_vfs = num_vfs;
5640
5641 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5642 dev_warn(&pdev->dev,
5643 "Cannot disable VFs while they are assigned\n");
5644 return -EBUSY;
5645 }
5646
5647 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5648 * are equally distributed across the max-number of VFs. The user may
5649 * request only a subset of the max-vfs to be enabled.
5650 * Based on num_vfs, redistribute the resources across num_vfs so that
5651 * each VF will have access to more number of resources.
5652 * This facility is not available in BE3 FW.
5653 * Also, this is done by FW in Lancer chip.
5654 */
5655 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04005656 be_calculate_vf_res(adapter, adapter->num_vfs,
5657 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05005658 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04005659 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05005660 if (status)
5661 dev_err(&pdev->dev,
5662 "Failed to optimize SR-IOV resources\n");
5663 }
5664
5665 status = be_get_resources(adapter);
5666 if (status)
5667 return be_cmd_status(status);
5668
5669 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5670 rtnl_lock();
5671 status = be_update_queues(adapter);
5672 rtnl_unlock();
5673 if (status)
5674 return be_cmd_status(status);
5675
5676 if (adapter->num_vfs)
5677 status = be_vf_setup(adapter);
5678
5679 if (!status)
5680 return adapter->num_vfs;
5681
5682 return 0;
5683}
5684
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005685static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005686 .error_detected = be_eeh_err_detected,
5687 .slot_reset = be_eeh_reset,
5688 .resume = be_eeh_resume,
5689};
5690
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005691static struct pci_driver be_driver = {
5692 .name = DRV_NAME,
5693 .id_table = be_dev_ids,
5694 .probe = be_probe,
5695 .remove = be_remove,
5696 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005697 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005698 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005699 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005700 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005701};
5702
5703static int __init be_init_module(void)
5704{
Joe Perches8e95a202009-12-03 07:58:21 +00005705 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5706 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005707 printk(KERN_WARNING DRV_NAME
5708 " : Module param rx_frag_size must be 2048/4096/8192."
5709 " Using 2048\n");
5710 rx_frag_size = 2048;
5711 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005712
Vasundhara Volamace40af2015-03-04 00:44:34 -05005713 if (num_vfs > 0) {
5714 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5715 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5716 }
5717
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005718 return pci_register_driver(&be_driver);
5719}
5720module_init(be_init_module);
5721
5722static void __exit be_exit_module(void)
5723{
5724 pci_unregister_driver(&be_driver);
5725}
5726module_exit(be_exit_module);