blob: 1a7f8ad7b9c6111ea2f8839a5d28c82af1ef13a8 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053044/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
Wei Yongjune6053dd2016-09-25 15:40:36 +000047static struct workqueue_struct *be_err_recovery_workq;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053048
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070050 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070051 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070052 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000054 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000055 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000056 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000057 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070058 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
Sathya Perlab7172412016-07-27 05:26:18 -040061
62/* Workqueue used by all functions for defering cmd calls to the adapter */
Wei Yongjune6053dd2016-09-25 15:40:36 +000063static struct workqueue_struct *be_wq;
Sathya Perlab7172412016-07-27 05:26:18 -040064
Ajit Khaparde7c185272010-07-29 06:16:33 +000065/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070066static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000067 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053095 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000099};
Kalesh APe2fb1af2014-09-19 15:46:58 +0530100
Ajit Khaparde7c185272010-07-29 06:16:33 +0000101/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -0700102static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +0000103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530124 "ECRC",
125 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700126 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000134 "Unknown"
135};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530145
Sathya Perla1cfafab2012-02-23 18:50:15 +0000146 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000149 mem->va = NULL;
150 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530154 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000165 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return 0;
167}
168
Somnath Kotur68c45a22013-03-14 02:42:07 +0000169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170{
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530174 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000179 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000181 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000183
Sathya Perladb3ea782011-08-22 19:41:52 +0000184 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530196 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530207
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000213
214 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220{
221 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530222
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000228
229 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000230 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000241
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530242 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000243 return;
244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253}
254
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256{
257 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530258
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000262
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530263 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000264 return;
265
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270}
271
Suresh Reddy988d44b2016-09-07 19:57:52 +0530272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
Ivan Vecera1d0f1102017-01-06 20:30:02 +0100278 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +0530279 /* mac already added, skip addition */
280 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
281 return 0;
282 }
283 }
284
285 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
286 &adapter->pmac_id[0], 0);
287}
288
289static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
290{
291 int i;
292
293 /* Skip deletion if the programmed mac is
294 * being used in uc-list
295 */
296 for (i = 0; i < adapter->uc_macs; i++) {
297 if (adapter->pmac_id[i + 1] == pmac_id)
298 return;
299 }
300 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
301}
302
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700303static int be_mac_addr_set(struct net_device *netdev, void *p)
304{
305 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530306 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700307 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530308 int status;
309 u8 mac[ETH_ALEN];
Suresh Reddy988d44b2016-09-07 19:57:52 +0530310 u32 old_pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000312 if (!is_valid_ether_addr(addr->sa_data))
313 return -EADDRNOTAVAIL;
314
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530315 /* Proceed further only if, User provided MAC is different
316 * from active MAC
317 */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530318 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530319 return 0;
320
Ivan Vecera34393522017-01-13 22:38:29 +0100321 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
322 * address
323 */
324 if (BEx_chip(adapter) && be_virtfn(adapter) &&
325 !check_privilege(adapter, BE_PRIV_FILTMGMT))
326 return -EPERM;
327
Kalesh APbcc84142015-08-05 03:27:48 -0400328 /* if device is not running, copy MAC to netdev->dev_addr */
329 if (!netif_running(netdev))
330 goto done;
331
Sathya Perla5a712c12013-07-23 15:24:59 +0530332 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
333 * privilege or if PF did not provision the new MAC address.
334 * On BE3, this cmd will always fail if the VF doesn't have the
335 * FILTMGMT privilege. This failure is OK, only if the PF programmed
336 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000337 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530338 mutex_lock(&adapter->rx_filter_lock);
339 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
Sathya Perla5a712c12013-07-23 15:24:59 +0530340 if (!status) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530341
342 /* Delete the old programmed MAC. This call may fail if the
343 * old MAC was already deleted by the PF driver.
344 */
345 if (adapter->pmac_id[0] != old_pmac_id)
Suresh Reddy988d44b2016-09-07 19:57:52 +0530346 be_dev_mac_del(adapter, old_pmac_id);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000347 }
348
Suresh Reddy988d44b2016-09-07 19:57:52 +0530349 mutex_unlock(&adapter->rx_filter_lock);
Sathya Perla5a712c12013-07-23 15:24:59 +0530350 /* Decide if the new MAC is successfully activated only after
351 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000352 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530353 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
Suresh Reddyb188f092014-01-15 13:23:39 +0530354 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000355 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000356 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700357
Sathya Perla5a712c12013-07-23 15:24:59 +0530358 /* The MAC change did not happen, either due to lack of privilege
359 * or PF didn't pre-provision.
360 */
dingtianhong61d23e92013-12-30 15:40:43 +0800361 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530362 status = -EPERM;
363 goto err;
364 }
Kalesh APbcc84142015-08-05 03:27:48 -0400365done:
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530366 ether_addr_copy(adapter->dev_mac, addr->sa_data);
Kalesh APbcc84142015-08-05 03:27:48 -0400367 ether_addr_copy(netdev->dev_addr, addr->sa_data);
368 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000369 return 0;
370err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530371 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700372 return status;
373}
374
Sathya Perlaca34fe32012-11-06 17:48:56 +0000375/* BE2 supports only v0 cmd */
376static void *hw_stats_from_cmd(struct be_adapter *adapter)
377{
378 if (BE2_chip(adapter)) {
379 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
380
381 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500382 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000383 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
384
385 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500386 } else {
387 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
388
389 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000390 }
391}
392
393/* BE2 supports only v0 cmd */
394static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
395{
396 if (BE2_chip(adapter)) {
397 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
398
399 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500400 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000401 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
402
403 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500404 } else {
405 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
406
407 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000408 }
409}
410
411static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000413 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
414 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
415 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 &rxf_stats->port[adapter->port_num];
418 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419
Sathya Perlaac124ff2011-07-25 19:10:14 +0000420 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 drvs->rx_pause_frames = port_stats->rx_pause_frames;
422 drvs->rx_crc_errors = port_stats->rx_crc_errors;
423 drvs->rx_control_frames = port_stats->rx_control_frames;
424 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
425 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
426 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
427 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
428 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
429 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
430 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
431 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
432 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
433 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
434 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 drvs->rx_dropped_header_too_small =
437 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000438 drvs->rx_address_filtered =
439 port_stats->rx_address_filtered +
440 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 drvs->rx_alignment_symbol_errors =
442 port_stats->rx_alignment_symbol_errors;
443
444 drvs->tx_pauseframes = port_stats->tx_pauseframes;
445 drvs->tx_controlframes = port_stats->tx_controlframes;
446
447 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000448 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000450 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000452 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000453 drvs->forwarded_packets = rxf_stats->forwarded_packets;
454 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000455 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
456 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000457 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
458}
459
Sathya Perlaca34fe32012-11-06 17:48:56 +0000460static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000461{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000462 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
463 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
464 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000465 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000466 &rxf_stats->port[adapter->port_num];
467 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000468
Sathya Perlaac124ff2011-07-25 19:10:14 +0000469 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000470 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
471 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000472 drvs->rx_pause_frames = port_stats->rx_pause_frames;
473 drvs->rx_crc_errors = port_stats->rx_crc_errors;
474 drvs->rx_control_frames = port_stats->rx_control_frames;
475 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
476 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
477 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
478 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
479 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
480 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
481 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
482 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
483 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
484 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
485 drvs->rx_dropped_header_too_small =
486 port_stats->rx_dropped_header_too_small;
487 drvs->rx_input_fifo_overflow_drop =
488 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000489 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000490 drvs->rx_alignment_symbol_errors =
491 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000492 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000493 drvs->tx_pauseframes = port_stats->tx_pauseframes;
494 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000495 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000496 drvs->jabber_events = port_stats->jabber_events;
497 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000498 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000499 drvs->forwarded_packets = rxf_stats->forwarded_packets;
500 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000501 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
502 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000503 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
504}
505
Ajit Khaparde61000862013-10-03 16:16:33 -0500506static void populate_be_v2_stats(struct be_adapter *adapter)
507{
508 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
509 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
510 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
511 struct be_port_rxf_stats_v2 *port_stats =
512 &rxf_stats->port[adapter->port_num];
513 struct be_drv_stats *drvs = &adapter->drv_stats;
514
515 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
516 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
517 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
518 drvs->rx_pause_frames = port_stats->rx_pause_frames;
519 drvs->rx_crc_errors = port_stats->rx_crc_errors;
520 drvs->rx_control_frames = port_stats->rx_control_frames;
521 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
522 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
523 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
524 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
525 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
526 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
527 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
528 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
529 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
530 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
531 drvs->rx_dropped_header_too_small =
532 port_stats->rx_dropped_header_too_small;
533 drvs->rx_input_fifo_overflow_drop =
534 port_stats->rx_input_fifo_overflow_drop;
535 drvs->rx_address_filtered = port_stats->rx_address_filtered;
536 drvs->rx_alignment_symbol_errors =
537 port_stats->rx_alignment_symbol_errors;
538 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
539 drvs->tx_pauseframes = port_stats->tx_pauseframes;
540 drvs->tx_controlframes = port_stats->tx_controlframes;
541 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
542 drvs->jabber_events = port_stats->jabber_events;
543 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
544 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
545 drvs->forwarded_packets = rxf_stats->forwarded_packets;
546 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
547 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
548 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
549 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530550 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500551 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
552 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
553 drvs->rx_roce_frames = port_stats->roce_frames_received;
554 drvs->roce_drops_crc = port_stats->roce_drops_crc;
555 drvs->roce_drops_payload_len =
556 port_stats->roce_drops_payload_len;
557 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500558}
559
Selvin Xavier005d5692011-05-16 07:36:35 +0000560static void populate_lancer_stats(struct be_adapter *adapter)
561{
Selvin Xavier005d5692011-05-16 07:36:35 +0000562 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530563 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000564
565 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
566 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
567 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
568 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000569 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000570 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000571 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
572 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
573 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
574 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
575 drvs->rx_dropped_tcp_length =
576 pport_stats->rx_dropped_invalid_tcp_length;
577 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
578 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
579 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
580 drvs->rx_dropped_header_too_small =
581 pport_stats->rx_dropped_header_too_small;
582 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000583 drvs->rx_address_filtered =
584 pport_stats->rx_address_filtered +
585 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000586 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000587 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000588 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
589 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000590 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000591 drvs->forwarded_packets = pport_stats->num_forwards_lo;
592 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000593 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000594 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000595}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000596
Sathya Perla09c1c682011-08-22 19:41:53 +0000597static void accumulate_16bit_val(u32 *acc, u16 val)
598{
599#define lo(x) (x & 0xFFFF)
600#define hi(x) (x & 0xFFFF0000)
601 bool wrapped = val < lo(*acc);
602 u32 newacc = hi(*acc) + val;
603
604 if (wrapped)
605 newacc += 65536;
606 ACCESS_ONCE(*acc) = newacc;
607}
608
Jingoo Han4188e7d2013-08-05 18:02:02 +0900609static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530610 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000611{
612 if (!BEx_chip(adapter))
613 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
614 else
615 /* below erx HW counter can actually wrap around after
616 * 65535. Driver accumulates a 32-bit value
617 */
618 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
619 (u16)erx_stat);
620}
621
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622void be_parse_stats(struct be_adapter *adapter)
623{
Ajit Khaparde61000862013-10-03 16:16:33 -0500624 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000625 struct be_rx_obj *rxo;
626 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000627 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000628
Sathya Perlaca34fe32012-11-06 17:48:56 +0000629 if (lancer_chip(adapter)) {
630 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000631 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000632 if (BE2_chip(adapter))
633 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500634 else if (BE3_chip(adapter))
635 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000636 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500637 else
638 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000639
Ajit Khaparde61000862013-10-03 16:16:33 -0500640 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000641 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000642 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
643 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000644 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000645 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000646}
647
Sathya Perlaab1594e2011-07-25 19:10:15 +0000648static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530649 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000651 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000652 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700653 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000654 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000655 u64 pkts, bytes;
656 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700657 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658
Sathya Perla3abcded2010-10-03 22:12:27 -0700659 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000660 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530661
Sathya Perlaab1594e2011-07-25 19:10:15 +0000662 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700663 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000664 pkts = rx_stats(rxo)->rx_pkts;
665 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700666 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000667 stats->rx_packets += pkts;
668 stats->rx_bytes += bytes;
669 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
670 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
671 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700672 }
673
Sathya Perla3c8def92011-06-12 20:01:58 +0000674 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000675 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530676
Sathya Perlaab1594e2011-07-25 19:10:15 +0000677 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700678 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000679 pkts = tx_stats(txo)->tx_pkts;
680 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700681 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000682 stats->tx_packets += pkts;
683 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000684 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685
686 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000687 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000688 drvs->rx_alignment_symbol_errors +
689 drvs->rx_in_range_errors +
690 drvs->rx_out_range_errors +
691 drvs->rx_frame_too_long +
692 drvs->rx_dropped_too_small +
693 drvs->rx_dropped_too_short +
694 drvs->rx_dropped_header_too_small +
695 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000696 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000699 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000700 drvs->rx_out_range_errors +
701 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000702
Sathya Perlaab1594e2011-07-25 19:10:15 +0000703 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704
705 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000706 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000707
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708 /* receiver fifo overrun */
709 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000710 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000711 drvs->rx_input_fifo_overflow_drop +
712 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000713 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714}
715
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000716void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718 struct net_device *netdev = adapter->netdev;
719
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000720 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000721 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000722 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700723 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000724
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530725 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000726 netif_carrier_on(netdev);
727 else
728 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200729
730 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700731}
732
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530733static int be_gso_hdr_len(struct sk_buff *skb)
734{
735 if (skb->encapsulation)
736 return skb_inner_transport_offset(skb) +
737 inner_tcp_hdrlen(skb);
738 return skb_transport_offset(skb) + tcp_hdrlen(skb);
739}
740
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500741static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742{
Sathya Perla3c8def92011-06-12 20:01:58 +0000743 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530744 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
745 /* Account for headers which get duplicated in TSO pkt */
746 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000747
Sathya Perlaab1594e2011-07-25 19:10:15 +0000748 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000749 stats->tx_reqs++;
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530750 stats->tx_bytes += skb->len + dup_hdr_len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530751 stats->tx_pkts += tx_pkts;
752 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
753 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000754 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755}
756
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500757/* Returns number of WRBs needed for the skb */
758static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700759{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500760 /* +1 for the header wrb */
761 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700762}
763
764static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
765{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500766 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
767 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
768 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
769 wrb->rsvd0 = 0;
770}
771
772/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
773 * to avoid the swap and shift/mask operations in wrb_fill().
774 */
775static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
776{
777 wrb->frag_pa_hi = 0;
778 wrb->frag_pa_lo = 0;
779 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000780 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781}
782
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000783static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530784 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000785{
786 u8 vlan_prio;
787 u16 vlan_tag;
788
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100789 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000790 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
791 /* If vlan priority provided by OS is NOT in available bmap */
792 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
793 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500794 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000795
796 return vlan_tag;
797}
798
Sathya Perlac9c47142014-03-27 10:46:19 +0530799/* Used only for IP tunnel packets */
800static u16 skb_inner_ip_proto(struct sk_buff *skb)
801{
802 return (inner_ip_hdr(skb)->version == 4) ?
803 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
804}
805
806static u16 skb_ip_proto(struct sk_buff *skb)
807{
808 return (ip_hdr(skb)->version == 4) ?
809 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
810}
811
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530812static inline bool be_is_txq_full(struct be_tx_obj *txo)
813{
814 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
815}
816
817static inline bool be_can_txq_wake(struct be_tx_obj *txo)
818{
819 return atomic_read(&txo->q.used) < txo->q.len / 2;
820}
821
822static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
823{
824 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
825}
826
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530827static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
828 struct sk_buff *skb,
829 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530831 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000833 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530834 BE_WRB_F_SET(wrb_params->features, LSO, 1);
835 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000836 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530837 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530839 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530840 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530841 proto = skb_inner_ip_proto(skb);
842 } else {
843 proto = skb_ip_proto(skb);
844 }
845 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530846 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530847 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530848 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849 }
850
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100851 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530852 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
853 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854 }
855
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530856 BE_WRB_F_SET(wrb_params->features, CRC, 1);
857}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500858
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530859static void wrb_fill_hdr(struct be_adapter *adapter,
860 struct be_eth_hdr_wrb *hdr,
861 struct be_wrb_params *wrb_params,
862 struct sk_buff *skb)
863{
864 memset(hdr, 0, sizeof(*hdr));
865
866 SET_TX_WRB_HDR_BITS(crc, hdr,
867 BE_WRB_F_GET(wrb_params->features, CRC));
868 SET_TX_WRB_HDR_BITS(ipcs, hdr,
869 BE_WRB_F_GET(wrb_params->features, IPCS));
870 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
871 BE_WRB_F_GET(wrb_params->features, TCPCS));
872 SET_TX_WRB_HDR_BITS(udpcs, hdr,
873 BE_WRB_F_GET(wrb_params->features, UDPCS));
874
875 SET_TX_WRB_HDR_BITS(lso, hdr,
876 BE_WRB_F_GET(wrb_params->features, LSO));
877 SET_TX_WRB_HDR_BITS(lso6, hdr,
878 BE_WRB_F_GET(wrb_params->features, LSO6));
879 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
880
881 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
882 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500883 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530884 SET_TX_WRB_HDR_BITS(event, hdr,
885 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
886 SET_TX_WRB_HDR_BITS(vlan, hdr,
887 BE_WRB_F_GET(wrb_params->features, VLAN));
888 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
889
890 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
891 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530892 SET_TX_WRB_HDR_BITS(mgmt, hdr,
893 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700894}
895
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000896static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530897 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000898{
899 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500900 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000901
Sathya Perla7101e112010-03-22 20:41:12 +0000902
Sathya Perlaf986afc2015-02-06 08:18:43 -0500903 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
904 (u64)le32_to_cpu(wrb->frag_pa_lo);
905 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000906 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500907 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000908 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500909 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000910 }
911}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700912
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530913/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530914static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700915{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530916 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700917
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530918 queue_head_inc(&txo->q);
919 return head;
920}
921
922/* Set up the WRB header for xmit */
923static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
924 struct be_tx_obj *txo,
925 struct be_wrb_params *wrb_params,
926 struct sk_buff *skb, u16 head)
927{
928 u32 num_frags = skb_wrb_cnt(skb);
929 struct be_queue_info *txq = &txo->q;
930 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
931
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530932 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500933 be_dws_cpu_to_le(hdr, sizeof(*hdr));
934
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500935 BUG_ON(txo->sent_skb_list[head]);
936 txo->sent_skb_list[head] = skb;
937 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530938 atomic_add(num_frags, &txq->used);
939 txo->last_req_wrb_cnt = num_frags;
940 txo->pend_wrb_cnt += num_frags;
941}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700942
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530943/* Setup a WRB fragment (buffer descriptor) for xmit */
944static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
945 int len)
946{
947 struct be_eth_wrb *wrb;
948 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700949
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530950 wrb = queue_head_node(txq);
951 wrb_fill(wrb, busaddr, len);
952 queue_head_inc(txq);
953}
954
955/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
956 * was invoked. The producer index is restored to the previous packet and the
957 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
958 */
959static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530960 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530961 u32 copied)
962{
963 struct device *dev;
964 struct be_eth_wrb *wrb;
965 struct be_queue_info *txq = &txo->q;
966
967 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500968 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530969
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500970 /* skip the first wrb (hdr); it's not mapped */
971 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000972 while (copied) {
973 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000974 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000975 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500976 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000977 queue_head_inc(txq);
978 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530979
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500980 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530981}
982
983/* Enqueue the given packet for transmit. This routine allocates WRBs for the
984 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
985 * of WRBs used up by the packet.
986 */
987static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
988 struct sk_buff *skb,
989 struct be_wrb_params *wrb_params)
990{
991 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
992 struct device *dev = &adapter->pdev->dev;
993 struct be_queue_info *txq = &txo->q;
994 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530995 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530996 dma_addr_t busaddr;
997 int len;
998
999 head = be_tx_get_wrb_hdr(txo);
1000
1001 if (skb->len > skb->data_len) {
1002 len = skb_headlen(skb);
1003
1004 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1005 if (dma_mapping_error(dev, busaddr))
1006 goto dma_err;
1007 map_single = true;
1008 be_tx_setup_wrb_frag(txo, busaddr, len);
1009 copied += len;
1010 }
1011
1012 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1013 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1014 len = skb_frag_size(frag);
1015
1016 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1017 if (dma_mapping_error(dev, busaddr))
1018 goto dma_err;
1019 be_tx_setup_wrb_frag(txo, busaddr, len);
1020 copied += len;
1021 }
1022
1023 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1024
1025 be_tx_stats_update(txo, skb);
1026 return wrb_cnt;
1027
1028dma_err:
1029 adapter->drv_stats.dma_map_errors++;
1030 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +00001031 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001032}
1033
Sathya Perlaf7062ee2015-02-06 08:18:35 -05001034static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1035{
1036 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1037}
1038
Somnath Kotur93040ae2012-06-26 22:32:10 +00001039static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001040 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301041 struct be_wrb_params
1042 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +00001043{
1044 u16 vlan_tag = 0;
1045
1046 skb = skb_share_check(skb, GFP_ATOMIC);
1047 if (unlikely(!skb))
1048 return skb;
1049
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001050 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +00001051 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301052
1053 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1054 if (!vlan_tag)
1055 vlan_tag = adapter->pvid;
1056 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1057 * skip VLAN insertion
1058 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301059 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301060 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001061
1062 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001063 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1064 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001065 if (unlikely(!skb))
1066 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001067 skb->vlan_tci = 0;
1068 }
1069
1070 /* Insert the outer VLAN, if any */
1071 if (adapter->qnq_vid) {
1072 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001073 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1074 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001075 if (unlikely(!skb))
1076 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301077 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001078 }
1079
Somnath Kotur93040ae2012-06-26 22:32:10 +00001080 return skb;
1081}
1082
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001083static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1084{
1085 struct ethhdr *eh = (struct ethhdr *)skb->data;
1086 u16 offset = ETH_HLEN;
1087
1088 if (eh->h_proto == htons(ETH_P_IPV6)) {
1089 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1090
1091 offset += sizeof(struct ipv6hdr);
1092 if (ip6h->nexthdr != NEXTHDR_TCP &&
1093 ip6h->nexthdr != NEXTHDR_UDP) {
1094 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301095 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001096
1097 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1098 if (ehdr->hdrlen == 0xff)
1099 return true;
1100 }
1101 }
1102 return false;
1103}
1104
1105static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1106{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001107 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001108}
1109
Sathya Perla748b5392014-05-09 13:29:13 +05301110static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001111{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001112 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001113}
1114
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301115static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1116 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301117 struct be_wrb_params
1118 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001120 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001121 unsigned int eth_hdr_len;
1122 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001123
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001124 /* For padded packets, BE HW modifies tot_len field in IP header
1125 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001126 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001127 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001128 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1129 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001130 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001131 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001132 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001133 ip = (struct iphdr *)ip_hdr(skb);
1134 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1135 }
1136
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001137 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301138 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001139 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301140 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001141 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301142 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001143
Somnath Kotur93040ae2012-06-26 22:32:10 +00001144 /* HW has a bug wherein it will calculate CSUM for VLAN
1145 * pkts even though it is disabled.
1146 * Manually insert VLAN in pkt.
1147 */
1148 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001149 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301150 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001151 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301152 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001153 }
1154
1155 /* HW may lockup when VLAN HW tagging is requested on
1156 * certain ipv6 packets. Drop such pkts if the HW workaround to
1157 * skip HW tagging is not enabled by FW.
1158 */
1159 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301160 (adapter->pvid || adapter->qnq_vid) &&
1161 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001162 goto tx_drop;
1163
1164 /* Manual VLAN tag insertion to prevent:
1165 * ASIC lockup when the ASIC inserts VLAN tag into
1166 * certain ipv6 packets. Insert VLAN tags in driver,
1167 * and set event, completion, vlan bits accordingly
1168 * in the Tx WRB.
1169 */
1170 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1171 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301172 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001173 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301174 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001175 }
1176
Sathya Perlaee9c7992013-05-22 23:04:55 +00001177 return skb;
1178tx_drop:
1179 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301180err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001181 return NULL;
1182}
1183
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301184static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1185 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301186 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301187{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301188 int err;
1189
Suresh Reddy8227e992015-10-12 03:47:19 -04001190 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1191 * packets that are 32b or less may cause a transmit stall
1192 * on that port. The workaround is to pad such packets
1193 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301194 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001195 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001196 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301197 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301198 }
1199
1200 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301201 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301202 if (!skb)
1203 return NULL;
1204 }
1205
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301206 /* The stack can send us skbs with length greater than
1207 * what the HW can handle. Trim the extra bytes.
1208 */
1209 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1210 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1211 WARN_ON(err);
1212
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301213 return skb;
1214}
1215
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001216static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1217{
1218 struct be_queue_info *txq = &txo->q;
1219 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1220
1221 /* Mark the last request eventable if it hasn't been marked already */
1222 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1223 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1224
1225 /* compose a dummy wrb if there are odd set of wrbs to notify */
1226 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001227 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001228 queue_head_inc(txq);
1229 atomic_inc(&txq->used);
1230 txo->pend_wrb_cnt++;
1231 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1232 TX_HDR_WRB_NUM_SHIFT);
1233 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1234 TX_HDR_WRB_NUM_SHIFT);
1235 }
1236 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1237 txo->pend_wrb_cnt = 0;
1238}
1239
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301240/* OS2BMC related */
1241
1242#define DHCP_CLIENT_PORT 68
1243#define DHCP_SERVER_PORT 67
1244#define NET_BIOS_PORT1 137
1245#define NET_BIOS_PORT2 138
1246#define DHCPV6_RAS_PORT 547
1247
1248#define is_mc_allowed_on_bmc(adapter, eh) \
1249 (!is_multicast_filt_enabled(adapter) && \
1250 is_multicast_ether_addr(eh->h_dest) && \
1251 !is_broadcast_ether_addr(eh->h_dest))
1252
1253#define is_bc_allowed_on_bmc(adapter, eh) \
1254 (!is_broadcast_filt_enabled(adapter) && \
1255 is_broadcast_ether_addr(eh->h_dest))
1256
1257#define is_arp_allowed_on_bmc(adapter, skb) \
1258 (is_arp(skb) && is_arp_filt_enabled(adapter))
1259
1260#define is_broadcast_packet(eh, adapter) \
1261 (is_multicast_ether_addr(eh->h_dest) && \
1262 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1263
1264#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1265
1266#define is_arp_filt_enabled(adapter) \
1267 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1268
1269#define is_dhcp_client_filt_enabled(adapter) \
1270 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1271
1272#define is_dhcp_srvr_filt_enabled(adapter) \
1273 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1274
1275#define is_nbios_filt_enabled(adapter) \
1276 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1277
1278#define is_ipv6_na_filt_enabled(adapter) \
1279 (adapter->bmc_filt_mask & \
1280 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1281
1282#define is_ipv6_ra_filt_enabled(adapter) \
1283 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1284
1285#define is_ipv6_ras_filt_enabled(adapter) \
1286 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1287
1288#define is_broadcast_filt_enabled(adapter) \
1289 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1290
1291#define is_multicast_filt_enabled(adapter) \
1292 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1293
1294static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1295 struct sk_buff **skb)
1296{
1297 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1298 bool os2bmc = false;
1299
1300 if (!be_is_os2bmc_enabled(adapter))
1301 goto done;
1302
1303 if (!is_multicast_ether_addr(eh->h_dest))
1304 goto done;
1305
1306 if (is_mc_allowed_on_bmc(adapter, eh) ||
1307 is_bc_allowed_on_bmc(adapter, eh) ||
1308 is_arp_allowed_on_bmc(adapter, (*skb))) {
1309 os2bmc = true;
1310 goto done;
1311 }
1312
1313 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1314 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1315 u8 nexthdr = hdr->nexthdr;
1316
1317 if (nexthdr == IPPROTO_ICMPV6) {
1318 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1319
1320 switch (icmp6->icmp6_type) {
1321 case NDISC_ROUTER_ADVERTISEMENT:
1322 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1323 goto done;
1324 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1325 os2bmc = is_ipv6_na_filt_enabled(adapter);
1326 goto done;
1327 default:
1328 break;
1329 }
1330 }
1331 }
1332
1333 if (is_udp_pkt((*skb))) {
1334 struct udphdr *udp = udp_hdr((*skb));
1335
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001336 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301337 case DHCP_CLIENT_PORT:
1338 os2bmc = is_dhcp_client_filt_enabled(adapter);
1339 goto done;
1340 case DHCP_SERVER_PORT:
1341 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1342 goto done;
1343 case NET_BIOS_PORT1:
1344 case NET_BIOS_PORT2:
1345 os2bmc = is_nbios_filt_enabled(adapter);
1346 goto done;
1347 case DHCPV6_RAS_PORT:
1348 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1349 goto done;
1350 default:
1351 break;
1352 }
1353 }
1354done:
1355 /* For packets over a vlan, which are destined
1356 * to BMC, asic expects the vlan to be inline in the packet.
1357 */
1358 if (os2bmc)
1359 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1360
1361 return os2bmc;
1362}
1363
Sathya Perlaee9c7992013-05-22 23:04:55 +00001364static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1365{
1366 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001367 u16 q_idx = skb_get_queue_mapping(skb);
1368 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301369 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301370 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001371 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001372
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301373 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001374 if (unlikely(!skb))
1375 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001376
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301377 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1378
1379 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001380 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001381 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001382 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001383 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001384
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301385 /* if os2bmc is enabled and if the pkt is destined to bmc,
1386 * enqueue the pkt a 2nd time with mgmt bit set.
1387 */
1388 if (be_send_pkt_to_bmc(adapter, &skb)) {
1389 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1390 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1391 if (unlikely(!wrb_cnt))
1392 goto drop;
1393 else
1394 skb_get(skb);
1395 }
1396
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301397 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001398 netif_stop_subqueue(netdev, q_idx);
1399 tx_stats(txo)->tx_stops++;
1400 }
1401
1402 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1403 be_xmit_flush(adapter, txo);
1404
1405 return NETDEV_TX_OK;
1406drop:
1407 tx_stats(txo)->tx_drv_drops++;
1408 /* Flush the already enqueued tx requests */
1409 if (flush && txo->pend_wrb_cnt)
1410 be_xmit_flush(adapter, txo);
1411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412 return NETDEV_TX_OK;
1413}
1414
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001415static inline bool be_in_all_promisc(struct be_adapter *adapter)
1416{
1417 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1418 BE_IF_FLAGS_ALL_PROMISCUOUS;
1419}
1420
1421static int be_set_vlan_promisc(struct be_adapter *adapter)
1422{
1423 struct device *dev = &adapter->pdev->dev;
1424 int status;
1425
1426 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1427 return 0;
1428
1429 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1430 if (!status) {
1431 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1432 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1433 } else {
1434 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1435 }
1436 return status;
1437}
1438
1439static int be_clear_vlan_promisc(struct be_adapter *adapter)
1440{
1441 struct device *dev = &adapter->pdev->dev;
1442 int status;
1443
1444 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1445 if (!status) {
1446 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1447 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1448 }
1449 return status;
1450}
1451
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001453 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1454 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455 */
Sathya Perla10329df2012-06-05 19:37:18 +00001456static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457{
Vasundhara Volam50762662014-09-12 17:39:14 +05301458 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001459 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301460 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001461 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001462
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001463 /* No need to change the VLAN state if the I/F is in promiscuous */
1464 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001465 return 0;
1466
Sathya Perla92bf14a2013-08-27 16:57:32 +05301467 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001468 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001469
Somnath Kotur841f60f2016-07-27 05:26:15 -04001470 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1471 status = be_clear_vlan_promisc(adapter);
1472 if (status)
1473 return status;
1474 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001475 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301476 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1477 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001478
Vasundhara Volam435452a2015-03-20 06:28:23 -04001479 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001480 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001481 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001482 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001483 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1484 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301485 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001486 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001488 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489}
1490
Patrick McHardy80d5c362013-04-19 02:04:28 +00001491static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492{
1493 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001494 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495
Sathya Perlab7172412016-07-27 05:26:18 -04001496 mutex_lock(&adapter->rx_filter_lock);
1497
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001498 /* Packets with VID 0 are always received by Lancer by default */
1499 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001500 goto done;
Vasundhara Volam48291c22014-03-11 18:53:08 +05301501
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301502 if (test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001503 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001504
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301505 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301506 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001507
Sathya Perlab7172412016-07-27 05:26:18 -04001508 status = be_vid_config(adapter);
1509done:
1510 mutex_unlock(&adapter->rx_filter_lock);
1511 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001512}
1513
Patrick McHardy80d5c362013-04-19 02:04:28 +00001514static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515{
1516 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perlab7172412016-07-27 05:26:18 -04001517 int status = 0;
1518
1519 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001521 /* Packets with VID 0 are always received by Lancer by default */
1522 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001523 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001524
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301525 if (!test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001526 goto done;
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301527
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301528 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301529 adapter->vlans_added--;
1530
Sathya Perlab7172412016-07-27 05:26:18 -04001531 status = be_vid_config(adapter);
1532done:
1533 mutex_unlock(&adapter->rx_filter_lock);
1534 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535}
1536
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001537static void be_set_all_promisc(struct be_adapter *adapter)
1538{
1539 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1540 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1541}
1542
1543static void be_set_mc_promisc(struct be_adapter *adapter)
1544{
1545 int status;
1546
1547 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1548 return;
1549
1550 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1551 if (!status)
1552 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1553}
1554
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001555static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001556{
1557 int status;
1558
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001559 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1560 return;
1561
1562 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001563 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001564 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1565}
1566
1567static void be_clear_uc_promisc(struct be_adapter *adapter)
1568{
1569 int status;
1570
1571 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1572 return;
1573
1574 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1575 if (!status)
1576 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1577}
1578
1579/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1580 * We use a single callback function for both sync and unsync. We really don't
1581 * add/remove addresses through this callback. But, we use it to detect changes
1582 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1583 */
1584static int be_uc_list_update(struct net_device *netdev,
1585 const unsigned char *addr)
1586{
1587 struct be_adapter *adapter = netdev_priv(netdev);
1588
1589 adapter->update_uc_list = true;
1590 return 0;
1591}
1592
1593static int be_mc_list_update(struct net_device *netdev,
1594 const unsigned char *addr)
1595{
1596 struct be_adapter *adapter = netdev_priv(netdev);
1597
1598 adapter->update_mc_list = true;
1599 return 0;
1600}
1601
1602static void be_set_mc_list(struct be_adapter *adapter)
1603{
1604 struct net_device *netdev = adapter->netdev;
Sathya Perlab7172412016-07-27 05:26:18 -04001605 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001606 bool mc_promisc = false;
1607 int status;
1608
Sathya Perlab7172412016-07-27 05:26:18 -04001609 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001610 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1611
1612 if (netdev->flags & IFF_PROMISC) {
1613 adapter->update_mc_list = false;
1614 } else if (netdev->flags & IFF_ALLMULTI ||
1615 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1616 /* Enable multicast promisc if num configured exceeds
1617 * what we support
1618 */
1619 mc_promisc = true;
1620 adapter->update_mc_list = false;
1621 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1622 /* Update mc-list unconditionally if the iface was previously
1623 * in mc-promisc mode and now is out of that mode.
1624 */
1625 adapter->update_mc_list = true;
1626 }
1627
Sathya Perlab7172412016-07-27 05:26:18 -04001628 if (adapter->update_mc_list) {
1629 int i = 0;
1630
1631 /* cache the mc-list in adapter */
1632 netdev_for_each_mc_addr(ha, netdev) {
1633 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1634 i++;
1635 }
1636 adapter->mc_count = netdev_mc_count(netdev);
1637 }
1638 netif_addr_unlock_bh(netdev);
1639
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001640 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001641 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001642 } else if (adapter->update_mc_list) {
1643 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1644 if (!status)
1645 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1646 else
1647 be_set_mc_promisc(adapter);
1648
1649 adapter->update_mc_list = false;
1650 }
1651}
1652
1653static void be_clear_mc_list(struct be_adapter *adapter)
1654{
1655 struct net_device *netdev = adapter->netdev;
1656
1657 __dev_mc_unsync(netdev, NULL);
1658 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlab7172412016-07-27 05:26:18 -04001659 adapter->mc_count = 0;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001660}
1661
Suresh Reddy988d44b2016-09-07 19:57:52 +05301662static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1663{
Ivan Vecera1d0f1102017-01-06 20:30:02 +01001664 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05301665 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1666 return 0;
1667 }
1668
Ivan Vecera1d0f1102017-01-06 20:30:02 +01001669 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
Suresh Reddy988d44b2016-09-07 19:57:52 +05301670 adapter->if_handle,
1671 &adapter->pmac_id[uc_idx + 1], 0);
1672}
1673
1674static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1675{
1676 if (pmac_id == adapter->pmac_id[0])
1677 return;
1678
1679 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1680}
1681
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001682static void be_set_uc_list(struct be_adapter *adapter)
1683{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001684 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001685 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001686 bool uc_promisc = false;
Sathya Perlab7172412016-07-27 05:26:18 -04001687 int curr_uc_macs = 0, i;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001688
Sathya Perlab7172412016-07-27 05:26:18 -04001689 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001690 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001691
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001692 if (netdev->flags & IFF_PROMISC) {
1693 adapter->update_uc_list = false;
1694 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1695 uc_promisc = true;
1696 adapter->update_uc_list = false;
1697 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1698 /* Update uc-list unconditionally if the iface was previously
1699 * in uc-promisc mode and now is out of that mode.
1700 */
1701 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001702 }
1703
Sathya Perlab7172412016-07-27 05:26:18 -04001704 if (adapter->update_uc_list) {
Sathya Perlab7172412016-07-27 05:26:18 -04001705 /* cache the uc-list in adapter array */
Ivan Vecera6052cd12017-01-06 21:59:30 +01001706 i = 0;
Sathya Perlab7172412016-07-27 05:26:18 -04001707 netdev_for_each_uc_addr(ha, netdev) {
1708 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1709 i++;
1710 }
1711 curr_uc_macs = netdev_uc_count(netdev);
1712 }
1713 netif_addr_unlock_bh(netdev);
1714
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001715 if (uc_promisc) {
1716 be_set_uc_promisc(adapter);
1717 } else if (adapter->update_uc_list) {
1718 be_clear_uc_promisc(adapter);
1719
Sathya Perlab7172412016-07-27 05:26:18 -04001720 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301721 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001722
Sathya Perlab7172412016-07-27 05:26:18 -04001723 for (i = 0; i < curr_uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301724 be_uc_mac_add(adapter, i);
Sathya Perlab7172412016-07-27 05:26:18 -04001725 adapter->uc_macs = curr_uc_macs;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001726 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001727 }
1728}
1729
1730static void be_clear_uc_list(struct be_adapter *adapter)
1731{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001732 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001733 int i;
1734
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001735 __dev_uc_unsync(netdev, NULL);
Sathya Perlab7172412016-07-27 05:26:18 -04001736 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301737 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1738
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001739 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301740}
1741
Sathya Perlab7172412016-07-27 05:26:18 -04001742static void __be_set_rx_mode(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743{
Sathya Perlab7172412016-07-27 05:26:18 -04001744 struct net_device *netdev = adapter->netdev;
1745
1746 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001747
1748 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001749 if (!be_in_all_promisc(adapter))
1750 be_set_all_promisc(adapter);
1751 } else if (be_in_all_promisc(adapter)) {
1752 /* We need to re-program the vlan-list or clear
1753 * vlan-promisc mode (if needed) when the interface
1754 * comes out of promisc mode.
1755 */
1756 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001757 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001758
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001759 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001760 be_set_mc_list(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04001761
1762 mutex_unlock(&adapter->rx_filter_lock);
1763}
1764
1765static void be_work_set_rx_mode(struct work_struct *work)
1766{
1767 struct be_cmd_work *cmd_work =
1768 container_of(work, struct be_cmd_work, work);
1769
1770 __be_set_rx_mode(cmd_work->adapter);
1771 kfree(cmd_work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772}
1773
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001774static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1775{
1776 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001777 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001778 int status;
1779
Sathya Perla11ac75e2011-12-13 00:58:50 +00001780 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001781 return -EPERM;
1782
Sathya Perla11ac75e2011-12-13 00:58:50 +00001783 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001784 return -EINVAL;
1785
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301786 /* Proceed further only if user provided MAC is different
1787 * from active MAC
1788 */
1789 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1790 return 0;
1791
Sathya Perla3175d8c2013-07-23 15:25:03 +05301792 if (BEx_chip(adapter)) {
1793 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1794 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001795
Sathya Perla11ac75e2011-12-13 00:58:50 +00001796 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1797 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301798 } else {
1799 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1800 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001801 }
1802
Kalesh APabccf232014-07-17 16:20:24 +05301803 if (status) {
1804 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1805 mac, vf, status);
1806 return be_cmd_status(status);
1807 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001808
Kalesh APabccf232014-07-17 16:20:24 +05301809 ether_addr_copy(vf_cfg->mac_addr, mac);
1810
1811 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001812}
1813
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001814static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301815 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001816{
1817 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001818 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001819
Sathya Perla11ac75e2011-12-13 00:58:50 +00001820 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001821 return -EPERM;
1822
Sathya Perla11ac75e2011-12-13 00:58:50 +00001823 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001824 return -EINVAL;
1825
1826 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001827 vi->max_tx_rate = vf_cfg->tx_rate;
1828 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001829 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1830 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001831 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301832 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001833 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001834
1835 return 0;
1836}
1837
Vasundhara Volam435452a2015-03-20 06:28:23 -04001838static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1839{
1840 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1841 u16 vids[BE_NUM_VLANS_SUPPORTED];
1842 int vf_if_id = vf_cfg->if_handle;
1843 int status;
1844
1845 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001846 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001847 if (status)
1848 return status;
1849
1850 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1851 vids[0] = 0;
1852 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1853 if (!status)
1854 dev_info(&adapter->pdev->dev,
1855 "Cleared guest VLANs on VF%d", vf);
1856
1857 /* After TVT is enabled, disallow VFs to program VLAN filters */
1858 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1859 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1860 ~BE_PRIV_FILTMGMT, vf + 1);
1861 if (!status)
1862 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1863 }
1864 return 0;
1865}
1866
1867static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1868{
1869 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1870 struct device *dev = &adapter->pdev->dev;
1871 int status;
1872
1873 /* Reset Transparent VLAN Tagging. */
1874 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001875 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001876 if (status)
1877 return status;
1878
1879 /* Allow VFs to program VLAN filtering */
1880 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1881 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1882 BE_PRIV_FILTMGMT, vf + 1);
1883 if (!status) {
1884 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1885 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1886 }
1887 }
1888
1889 dev_info(dev,
1890 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1891 return 0;
1892}
1893
Moshe Shemesh79aab092016-09-22 12:11:15 +03001894static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1895 __be16 vlan_proto)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001896{
1897 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001898 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001899 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001900
Sathya Perla11ac75e2011-12-13 00:58:50 +00001901 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001902 return -EPERM;
1903
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001904 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001905 return -EINVAL;
1906
Moshe Shemesh79aab092016-09-22 12:11:15 +03001907 if (vlan_proto != htons(ETH_P_8021Q))
1908 return -EPROTONOSUPPORT;
1909
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001910 if (vlan || qos) {
1911 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001912 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001913 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001914 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001915 }
1916
Kalesh APabccf232014-07-17 16:20:24 +05301917 if (status) {
1918 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001919 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1920 status);
Kalesh APabccf232014-07-17 16:20:24 +05301921 return be_cmd_status(status);
1922 }
1923
1924 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301925 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001926}
1927
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001928static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1929 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001930{
1931 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301932 struct device *dev = &adapter->pdev->dev;
1933 int percent_rate, status = 0;
1934 u16 link_speed = 0;
1935 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001936
Sathya Perla11ac75e2011-12-13 00:58:50 +00001937 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001938 return -EPERM;
1939
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001940 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001941 return -EINVAL;
1942
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001943 if (min_tx_rate)
1944 return -EINVAL;
1945
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301946 if (!max_tx_rate)
1947 goto config_qos;
1948
1949 status = be_cmd_link_status_query(adapter, &link_speed,
1950 &link_status, 0);
1951 if (status)
1952 goto err;
1953
1954 if (!link_status) {
1955 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301956 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301957 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001958 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001959
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301960 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1961 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1962 link_speed);
1963 status = -EINVAL;
1964 goto err;
1965 }
1966
1967 /* On Skyhawk the QOS setting must be done only as a % value */
1968 percent_rate = link_speed / 100;
1969 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1970 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1971 percent_rate);
1972 status = -EINVAL;
1973 goto err;
1974 }
1975
1976config_qos:
1977 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001978 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301979 goto err;
1980
1981 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1982 return 0;
1983
1984err:
1985 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1986 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301987 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001988}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301989
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301990static int be_set_vf_link_state(struct net_device *netdev, int vf,
1991 int link_state)
1992{
1993 struct be_adapter *adapter = netdev_priv(netdev);
1994 int status;
1995
1996 if (!sriov_enabled(adapter))
1997 return -EPERM;
1998
1999 if (vf >= adapter->num_vfs)
2000 return -EINVAL;
2001
2002 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05302003 if (status) {
2004 dev_err(&adapter->pdev->dev,
2005 "Link state change on VF %d failed: %#x\n", vf, status);
2006 return be_cmd_status(status);
2007 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302008
Kalesh APabccf232014-07-17 16:20:24 +05302009 adapter->vf_cfg[vf].plink_tracking = link_state;
2010
2011 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302012}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002013
Kalesh APe7bcbd72015-05-06 05:30:32 -04002014static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2015{
2016 struct be_adapter *adapter = netdev_priv(netdev);
2017 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2018 u8 spoofchk;
2019 int status;
2020
2021 if (!sriov_enabled(adapter))
2022 return -EPERM;
2023
2024 if (vf >= adapter->num_vfs)
2025 return -EINVAL;
2026
2027 if (BEx_chip(adapter))
2028 return -EOPNOTSUPP;
2029
2030 if (enable == vf_cfg->spoofchk)
2031 return 0;
2032
2033 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2034
2035 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2036 0, spoofchk);
2037 if (status) {
2038 dev_err(&adapter->pdev->dev,
2039 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2040 return be_cmd_status(status);
2041 }
2042
2043 vf_cfg->spoofchk = enable;
2044 return 0;
2045}
2046
Sathya Perla2632baf2013-10-01 16:00:00 +05302047static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2048 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049{
Sathya Perla2632baf2013-10-01 16:00:00 +05302050 aic->rx_pkts_prev = rx_pkts;
2051 aic->tx_reqs_prev = tx_pkts;
2052 aic->jiffies = now;
2053}
Sathya Perlaac124ff2011-07-25 19:10:14 +00002054
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002055static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05302056{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002057 struct be_adapter *adapter = eqo->adapter;
2058 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05302059 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05302060 struct be_rx_obj *rxo;
2061 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002062 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05302063 ulong now;
2064 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002065 int i;
2066
2067 aic = &adapter->aic_obj[eqo->idx];
2068 if (!aic->enable) {
2069 if (aic->jiffies)
2070 aic->jiffies = 0;
2071 eqd = aic->et_eqd;
2072 return eqd;
2073 }
2074
2075 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2076 do {
2077 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2078 rx_pkts += rxo->stats.rx_pkts;
2079 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2080 }
2081
2082 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2083 do {
2084 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2085 tx_pkts += txo->stats.tx_reqs;
2086 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2087 }
2088
2089 /* Skip, if wrapped around or first calculation */
2090 now = jiffies;
2091 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2092 rx_pkts < aic->rx_pkts_prev ||
2093 tx_pkts < aic->tx_reqs_prev) {
2094 be_aic_update(aic, rx_pkts, tx_pkts, now);
2095 return aic->prev_eqd;
2096 }
2097
2098 delta = jiffies_to_msecs(now - aic->jiffies);
2099 if (delta == 0)
2100 return aic->prev_eqd;
2101
2102 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2103 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2104 eqd = (pps / 15000) << 2;
2105
2106 if (eqd < 8)
2107 eqd = 0;
2108 eqd = min_t(u32, eqd, aic->max_eqd);
2109 eqd = max_t(u32, eqd, aic->min_eqd);
2110
2111 be_aic_update(aic, rx_pkts, tx_pkts, now);
2112
2113 return eqd;
2114}
2115
2116/* For Skyhawk-R only */
2117static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2118{
2119 struct be_adapter *adapter = eqo->adapter;
2120 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2121 ulong now = jiffies;
2122 int eqd;
2123 u32 mult_enc;
2124
2125 if (!aic->enable)
2126 return 0;
2127
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302128 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002129 eqd = aic->prev_eqd;
2130 else
2131 eqd = be_get_new_eqd(eqo);
2132
2133 if (eqd > 100)
2134 mult_enc = R2I_DLY_ENC_1;
2135 else if (eqd > 60)
2136 mult_enc = R2I_DLY_ENC_2;
2137 else if (eqd > 20)
2138 mult_enc = R2I_DLY_ENC_3;
2139 else
2140 mult_enc = R2I_DLY_ENC_0;
2141
2142 aic->prev_eqd = eqd;
2143
2144 return mult_enc;
2145}
2146
2147void be_eqd_update(struct be_adapter *adapter, bool force_update)
2148{
2149 struct be_set_eqd set_eqd[MAX_EVT_QS];
2150 struct be_aic_obj *aic;
2151 struct be_eq_obj *eqo;
2152 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153
Sathya Perla2632baf2013-10-01 16:00:00 +05302154 for_all_evt_queues(adapter, eqo, i) {
2155 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002156 eqd = be_get_new_eqd(eqo);
2157 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302158 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2159 set_eqd[num].eq_id = eqo->q.id;
2160 aic->prev_eqd = eqd;
2161 num++;
2162 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002163 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302164
2165 if (num)
2166 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002167}
2168
Sathya Perla3abcded2010-10-03 22:12:27 -07002169static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302170 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002171{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002172 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002173
Sathya Perlaab1594e2011-07-25 19:10:15 +00002174 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002175 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002176 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002177 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302178 if (rxcp->tunneled)
2179 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002180 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002181 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002182 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002183 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002184 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002185}
2186
Sathya Perla2e588f82011-03-11 02:49:26 +00002187static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002188{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002189 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302190 * Also ignore ipcksm for ipv6 pkts
2191 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002192 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302193 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002194}
2195
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302196static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002198 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002199 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002200 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302201 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202
Sathya Perla3abcded2010-10-03 22:12:27 -07002203 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002204 BUG_ON(!rx_page_info->page);
2205
Sathya Perlae50287b2014-03-04 12:14:38 +05302206 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002207 dma_unmap_page(&adapter->pdev->dev,
2208 dma_unmap_addr(rx_page_info, bus),
2209 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302210 rx_page_info->last_frag = false;
2211 } else {
2212 dma_sync_single_for_cpu(&adapter->pdev->dev,
2213 dma_unmap_addr(rx_page_info, bus),
2214 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002215 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302217 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218 atomic_dec(&rxq->used);
2219 return rx_page_info;
2220}
2221
2222/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002223static void be_rx_compl_discard(struct be_rx_obj *rxo,
2224 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002227 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002229 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302230 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002231 put_page(page_info->page);
2232 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002233 }
2234}
2235
2236/*
2237 * skb_fill_rx_data forms a complete skb for an ether frame
2238 * indicated by rxcp.
2239 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002240static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2241 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002244 u16 i, j;
2245 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002246 u8 *start;
2247
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302248 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249 start = page_address(page_info->page) + page_info->page_offset;
2250 prefetch(start);
2251
2252 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002253 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002254
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255 skb->len = curr_frag_len;
2256 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002257 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258 /* Complete packet has now been moved to data */
2259 put_page(page_info->page);
2260 skb->data_len = 0;
2261 skb->tail += curr_frag_len;
2262 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002263 hdr_len = ETH_HLEN;
2264 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002265 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002266 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267 skb_shinfo(skb)->frags[0].page_offset =
2268 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302269 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2270 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002272 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273 skb->tail += hdr_len;
2274 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002275 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276
Sathya Perla2e588f82011-03-11 02:49:26 +00002277 if (rxcp->pkt_size <= rx_frag_size) {
2278 BUG_ON(rxcp->num_rcvd != 1);
2279 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280 }
2281
2282 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002283 remaining = rxcp->pkt_size - curr_frag_len;
2284 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302285 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002286 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002288 /* Coalesce all frags from the same physical page in one slot */
2289 if (page_info->page_offset == 0) {
2290 /* Fresh page */
2291 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002292 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002293 skb_shinfo(skb)->frags[j].page_offset =
2294 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002295 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002296 skb_shinfo(skb)->nr_frags++;
2297 } else {
2298 put_page(page_info->page);
2299 }
2300
Eric Dumazet9e903e02011-10-18 21:00:24 +00002301 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 skb->len += curr_frag_len;
2303 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002304 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002305 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002306 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002308 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309}
2310
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002311/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302312static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002313 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002315 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002316 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002317 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002318
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002319 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002320 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002321 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323 return;
2324 }
2325
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002328 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002329 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002330 else
2331 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002333 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002334 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002335 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002336 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302337
Tom Herbertb6c0e892014-08-27 21:27:17 -07002338 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302339 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340
Jiri Pirko343e43c2011-08-25 02:50:51 +00002341 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002342 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002343
2344 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002345}
2346
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002347/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002348static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2349 struct napi_struct *napi,
2350 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002352 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002354 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002355 u16 remaining, curr_frag_len;
2356 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002357
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002358 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002359 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002360 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002361 return;
2362 }
2363
Sathya Perla2e588f82011-03-11 02:49:26 +00002364 remaining = rxcp->pkt_size;
2365 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302366 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367
2368 curr_frag_len = min(remaining, rx_frag_size);
2369
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002370 /* Coalesce all frags from the same physical page in one slot */
2371 if (i == 0 || page_info->page_offset == 0) {
2372 /* First frag or Fresh page */
2373 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002374 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002375 skb_shinfo(skb)->frags[j].page_offset =
2376 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002377 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002378 } else {
2379 put_page(page_info->page);
2380 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002381 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002382 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 memset(page_info, 0, sizeof(*page_info));
2385 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002386 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002388 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002389 skb->len = rxcp->pkt_size;
2390 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002391 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002392 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002393 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002394 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302395
Tom Herbertb6c0e892014-08-27 21:27:17 -07002396 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002397
Jiri Pirko343e43c2011-08-25 02:50:51 +00002398 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002399 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002400
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002402}
2403
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002404static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2405 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302407 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2408 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2409 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2410 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2411 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2412 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2413 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2414 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2415 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2416 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2417 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002418 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302419 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2420 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002421 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302422 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302423 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302424 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002425}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002426
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002427static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2428 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002429{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302430 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2431 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2432 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2433 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2434 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2435 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2436 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2437 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2438 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2439 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2440 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002441 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302442 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2443 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002444 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302445 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2446 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002447}
2448
2449static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2450{
2451 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2452 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2453 struct be_adapter *adapter = rxo->adapter;
2454
2455 /* For checking the valid bit it is Ok to use either definition as the
2456 * valid bit is at the same position in both v0 and v1 Rx compl */
2457 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002458 return NULL;
2459
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002460 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002461 be_dws_le_to_cpu(compl, sizeof(*compl));
2462
2463 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002464 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002465 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002466 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002467
Somnath Koture38b1702013-05-29 22:55:56 +00002468 if (rxcp->ip_frag)
2469 rxcp->l4_csum = 0;
2470
Sathya Perla15d72182011-03-21 20:49:26 +00002471 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302472 /* In QNQ modes, if qnq bit is not set, then the packet was
2473 * tagged only with the transparent outer vlan-tag and must
2474 * not be treated as a vlan packet by host
2475 */
2476 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002477 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002478
Sathya Perla15d72182011-03-21 20:49:26 +00002479 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002480 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002481
Somnath Kotur939cf302011-08-18 21:51:49 -07002482 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302483 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002484 rxcp->vlanf = 0;
2485 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002486
2487 /* As the compl has been parsed, reset it; we wont touch it again */
2488 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002489
Sathya Perla3abcded2010-10-03 22:12:27 -07002490 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002491 return rxcp;
2492}
2493
Eric Dumazet1829b082011-03-01 05:48:12 +00002494static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002495{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002497
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002498 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002499 gfp |= __GFP_COMP;
2500 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002501}
2502
2503/*
2504 * Allocate a page, split it to fragments of size rx_frag_size and post as
2505 * receive buffers to BE
2506 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302507static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508{
Sathya Perla3abcded2010-10-03 22:12:27 -07002509 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002510 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002511 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002512 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002513 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514 struct be_eth_rx_d *rxd;
2515 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302516 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002517
Sathya Perla3abcded2010-10-03 22:12:27 -07002518 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302519 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002520 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002521 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002522 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002523 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002524 break;
2525 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002526 page_dmaaddr = dma_map_page(dev, pagep, 0,
2527 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002528 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002529 if (dma_mapping_error(dev, page_dmaaddr)) {
2530 put_page(pagep);
2531 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302532 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002533 break;
2534 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302535 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002536 } else {
2537 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302538 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002539 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302540 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002541 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002542
2543 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302544 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002545 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2546 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002547
2548 /* Any space left in the current big page for another frag? */
2549 if ((page_offset + rx_frag_size + rx_frag_size) >
2550 adapter->big_page_size) {
2551 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302552 page_info->last_frag = true;
2553 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2554 } else {
2555 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002556 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002557
2558 prev_page_info = page_info;
2559 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002560 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002561 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302562
2563 /* Mark the last frag of a page when we break out of the above loop
2564 * with no more slots available in the RXQ
2565 */
2566 if (pagep) {
2567 prev_page_info->last_frag = true;
2568 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2569 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002570
2571 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002572 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302573 if (rxo->rx_post_starved)
2574 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302575 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002576 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302577 be_rxq_notify(adapter, rxq->id, notify);
2578 posted -= notify;
2579 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002580 } else if (atomic_read(&rxq->used) == 0) {
2581 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002582 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002584}
2585
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302586static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002587{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302588 struct be_queue_info *tx_cq = &txo->cq;
2589 struct be_tx_compl_info *txcp = &txo->txcp;
2590 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002591
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302592 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002593 return NULL;
2594
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302595 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002596 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302597 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002598
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302599 txcp->status = GET_TX_COMPL_BITS(status, compl);
2600 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002601
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302602 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002603 queue_tail_inc(tx_cq);
2604 return txcp;
2605}
2606
Sathya Perla3c8def92011-06-12 20:01:58 +00002607static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302608 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002609{
Sathya Perla3c8def92011-06-12 20:01:58 +00002610 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002611 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002612 struct sk_buff *skb = NULL;
2613 bool unmap_skb_hdr = false;
2614 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302615 u16 num_wrbs = 0;
2616 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002617
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002618 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002619 if (sent_skbs[txq->tail]) {
2620 /* Free skb from prev req */
2621 if (skb)
2622 dev_consume_skb_any(skb);
2623 skb = sent_skbs[txq->tail];
2624 sent_skbs[txq->tail] = NULL;
2625 queue_tail_inc(txq); /* skip hdr wrb */
2626 num_wrbs++;
2627 unmap_skb_hdr = true;
2628 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002629 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002630 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002631 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002632 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002633 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002634 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002635 num_wrbs++;
2636 } while (frag_index != last_index);
2637 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002638
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002639 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002640}
2641
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002642/* Return the number of events in the event queue */
2643static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002644{
2645 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002646 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002647
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002648 do {
2649 eqe = queue_tail_node(&eqo->q);
2650 if (eqe->evt == 0)
2651 break;
2652
2653 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002654 eqe->evt = 0;
2655 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002656 queue_tail_inc(&eqo->q);
2657 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002658
2659 return num;
2660}
2661
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002662/* Leaves the EQ is disarmed state */
2663static void be_eq_clean(struct be_eq_obj *eqo)
2664{
2665 int num = events_get(eqo);
2666
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002667 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002668}
2669
Kalesh AP99b44302015-08-05 03:27:49 -04002670/* Free posted rx buffers that were not used */
2671static void be_rxq_clean(struct be_rx_obj *rxo)
2672{
2673 struct be_queue_info *rxq = &rxo->q;
2674 struct be_rx_page_info *page_info;
2675
2676 while (atomic_read(&rxq->used) > 0) {
2677 page_info = get_rx_page_info(rxo);
2678 put_page(page_info->page);
2679 memset(page_info, 0, sizeof(*page_info));
2680 }
2681 BUG_ON(atomic_read(&rxq->used));
2682 rxq->tail = 0;
2683 rxq->head = 0;
2684}
2685
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002686static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002687{
Sathya Perla3abcded2010-10-03 22:12:27 -07002688 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002689 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002690 struct be_adapter *adapter = rxo->adapter;
2691 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692
Sathya Perlad23e9462012-12-17 19:38:51 +00002693 /* Consume pending rx completions.
2694 * Wait for the flush completion (identified by zero num_rcvd)
2695 * to arrive. Notify CQ even when there are no more CQ entries
2696 * for HW to flush partially coalesced CQ entries.
2697 * In Lancer, there is no need to wait for flush compl.
2698 */
2699 for (;;) {
2700 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302701 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002702 if (lancer_chip(adapter))
2703 break;
2704
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302705 if (flush_wait++ > 50 ||
2706 be_check_error(adapter,
2707 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002708 dev_warn(&adapter->pdev->dev,
2709 "did not receive flush compl\n");
2710 break;
2711 }
2712 be_cq_notify(adapter, rx_cq->id, true, 0);
2713 mdelay(1);
2714 } else {
2715 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002716 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002717 if (rxcp->num_rcvd == 0)
2718 break;
2719 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002720 }
2721
Sathya Perlad23e9462012-12-17 19:38:51 +00002722 /* After cleanup, leave the CQ in unarmed state */
2723 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002724}
2725
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002726static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002727{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002728 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302729 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302730 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002731 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302732 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302733 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002734 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002735
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302736 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002737 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002738 pending_txqs = adapter->num_tx_qs;
2739
2740 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302741 cmpl = 0;
2742 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002743 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302744 while ((txcp = be_tx_compl_get(txo))) {
2745 num_wrbs +=
2746 be_tx_compl_process(adapter, txo,
2747 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002748 cmpl++;
2749 }
2750 if (cmpl) {
2751 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2752 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302753 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002754 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302755 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002756 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002757 }
2758
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302759 if (pending_txqs == 0 || ++timeo > 10 ||
2760 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002761 break;
2762
2763 mdelay(1);
2764 } while (true);
2765
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002766 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002767 for_all_tx_queues(adapter, txo, i) {
2768 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002769
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002770 if (atomic_read(&txq->used)) {
2771 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2772 i, atomic_read(&txq->used));
2773 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002774 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002775 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2776 txq->len);
2777 /* Use the tx-compl process logic to handle requests
2778 * that were not sent to the HW.
2779 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002780 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2781 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002782 BUG_ON(atomic_read(&txq->used));
2783 txo->pend_wrb_cnt = 0;
2784 /* Since hw was never notified of these requests,
2785 * reset TXQ indices
2786 */
2787 txq->head = notified_idx;
2788 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002789 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002790 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002791}
2792
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002793static void be_evt_queues_destroy(struct be_adapter *adapter)
2794{
2795 struct be_eq_obj *eqo;
2796 int i;
2797
2798 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002799 if (eqo->q.created) {
2800 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002801 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302802 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002803 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002804 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002805 be_queue_free(adapter, &eqo->q);
2806 }
2807}
2808
2809static int be_evt_queues_create(struct be_adapter *adapter)
2810{
2811 struct be_queue_info *eq;
2812 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302813 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002814 int i, rc;
2815
Sathya Perlae2617682016-06-22 08:54:54 -04002816 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302817 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002818 max(adapter->cfg_num_rx_irqs,
2819 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002820
2821 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302822 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002823
Sathya Perla2632baf2013-10-01 16:00:00 +05302824 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002825 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002826 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302827 aic->max_eqd = BE_MAX_EQD;
2828 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002829
2830 eq = &eqo->q;
2831 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302832 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002833 if (rc)
2834 return rc;
2835
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302836 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002837 if (rc)
2838 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002839
2840 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2841 return -ENOMEM;
2842 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2843 eqo->affinity_mask);
2844 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2845 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002846 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002847 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002848}
2849
Sathya Perla5fb379e2009-06-18 00:02:59 +00002850static void be_mcc_queues_destroy(struct be_adapter *adapter)
2851{
2852 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002853
Sathya Perla8788fdc2009-07-27 22:52:03 +00002854 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002855 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002856 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002857 be_queue_free(adapter, q);
2858
Sathya Perla8788fdc2009-07-27 22:52:03 +00002859 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002860 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002861 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002862 be_queue_free(adapter, q);
2863}
2864
2865/* Must be called only after TX qs are created as MCC shares TX EQ */
2866static int be_mcc_queues_create(struct be_adapter *adapter)
2867{
2868 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002869
Sathya Perla8788fdc2009-07-27 22:52:03 +00002870 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002871 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302872 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002873 goto err;
2874
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002875 /* Use the default EQ for MCC completions */
2876 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002877 goto mcc_cq_free;
2878
Sathya Perla8788fdc2009-07-27 22:52:03 +00002879 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002880 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2881 goto mcc_cq_destroy;
2882
Sathya Perla8788fdc2009-07-27 22:52:03 +00002883 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002884 goto mcc_q_free;
2885
2886 return 0;
2887
2888mcc_q_free:
2889 be_queue_free(adapter, q);
2890mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002891 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002892mcc_cq_free:
2893 be_queue_free(adapter, cq);
2894err:
2895 return -1;
2896}
2897
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002898static void be_tx_queues_destroy(struct be_adapter *adapter)
2899{
2900 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002901 struct be_tx_obj *txo;
2902 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002903
Sathya Perla3c8def92011-06-12 20:01:58 +00002904 for_all_tx_queues(adapter, txo, i) {
2905 q = &txo->q;
2906 if (q->created)
2907 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2908 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002909
Sathya Perla3c8def92011-06-12 20:01:58 +00002910 q = &txo->cq;
2911 if (q->created)
2912 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2913 be_queue_free(adapter, q);
2914 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002915}
2916
Sathya Perla77071332013-08-27 16:57:34 +05302917static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002918{
Sathya Perla73f394e2015-03-26 03:05:09 -04002919 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002920 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002921 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302922 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002923
Sathya Perlae2617682016-06-22 08:54:54 -04002924 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002925
Sathya Perla3c8def92011-06-12 20:01:58 +00002926 for_all_tx_queues(adapter, txo, i) {
2927 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002928 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2929 sizeof(struct be_eth_tx_compl));
2930 if (status)
2931 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002932
John Stultz827da442013-10-07 15:51:58 -07002933 u64_stats_init(&txo->stats.sync);
2934 u64_stats_init(&txo->stats.sync_compl);
2935
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002936 /* If num_evt_qs is less than num_tx_qs, then more than
2937 * one txq share an eq
2938 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002939 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2940 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002941 if (status)
2942 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002943
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002944 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2945 sizeof(struct be_eth_wrb));
2946 if (status)
2947 return status;
2948
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002949 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950 if (status)
2951 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002952
2953 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2954 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002955 }
2956
Sathya Perlad3791422012-09-28 04:39:44 +00002957 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2958 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002959 return 0;
2960}
2961
2962static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002963{
2964 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002965 struct be_rx_obj *rxo;
2966 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002967
Sathya Perla3abcded2010-10-03 22:12:27 -07002968 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002969 q = &rxo->cq;
2970 if (q->created)
2971 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2972 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002973 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002974}
2975
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002976static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002977{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002978 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002979 struct be_rx_obj *rxo;
2980 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002981
Sathya Perlae2617682016-06-22 08:54:54 -04002982 adapter->num_rss_qs =
2983 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05302984
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002985 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04002986 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002987 adapter->num_rss_qs = 0;
2988
2989 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2990
2991 /* When the interface is not capable of RSS rings (and there is no
2992 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002993 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002994 if (adapter->num_rx_qs == 0)
2995 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302996
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002997 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002998 for_all_rx_queues(adapter, rxo, i) {
2999 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07003000 cq = &rxo->cq;
3001 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05303002 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07003003 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003004 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003005
John Stultz827da442013-10-07 15:51:58 -07003006 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003007 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3008 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07003009 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003010 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07003011 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003012
Sathya Perlad3791422012-09-28 04:39:44 +00003013 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003014 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003015 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00003016}
3017
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003018static irqreturn_t be_intx(int irq, void *dev)
3019{
Sathya Perlae49cc342012-11-27 19:50:02 +00003020 struct be_eq_obj *eqo = dev;
3021 struct be_adapter *adapter = eqo->adapter;
3022 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003023
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003024 /* IRQ is not expected when NAPI is scheduled as the EQ
3025 * will not be armed.
3026 * But, this can happen on Lancer INTx where it takes
3027 * a while to de-assert INTx or in BE2 where occasionaly
3028 * an interrupt may be raised even when EQ is unarmed.
3029 * If NAPI is already scheduled, then counting & notifying
3030 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00003031 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003032 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003033 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003034 __napi_schedule(&eqo->napi);
3035 if (num_evts)
3036 eqo->spurious_intr = 0;
3037 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003038 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003039
3040 /* Return IRQ_HANDLED only for the the first spurious intr
3041 * after a valid intr to stop the kernel from branding
3042 * this irq as a bad one!
3043 */
3044 if (num_evts || eqo->spurious_intr++ == 0)
3045 return IRQ_HANDLED;
3046 else
3047 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003048}
3049
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003050static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003051{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003052 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003054 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00003055 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056 return IRQ_HANDLED;
3057}
3058
Sathya Perla2e588f82011-03-11 02:49:26 +00003059static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003060{
Somnath Koture38b1702013-05-29 22:55:56 +00003061 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062}
3063
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003064static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05303065 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003066{
Sathya Perla3abcded2010-10-03 22:12:27 -07003067 struct be_adapter *adapter = rxo->adapter;
3068 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00003069 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003070 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05303071 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003072
3073 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07003074 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003075 if (!rxcp)
3076 break;
3077
Sathya Perla12004ae2011-08-02 19:57:46 +00003078 /* Is it a flush compl that has no data */
3079 if (unlikely(rxcp->num_rcvd == 0))
3080 goto loop_continue;
3081
3082 /* Discard compl with partial DMA Lancer B0 */
3083 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003084 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003085 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00003086 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00003087
Sathya Perla12004ae2011-08-02 19:57:46 +00003088 /* On BE drop pkts that arrive due to imperfect filtering in
3089 * promiscuous mode on some skews
3090 */
3091 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05303092 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003093 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003094 goto loop_continue;
3095 }
3096
Sathya Perla6384a4d2013-10-25 10:40:16 +05303097 /* Don't do gro when we're busy_polling */
3098 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003099 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003100 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05303101 be_rx_compl_process(rxo, napi, rxcp);
3102
Sathya Perla12004ae2011-08-02 19:57:46 +00003103loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05303104 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00003105 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003106 }
3107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003108 if (work_done) {
3109 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003110
Sathya Perla6384a4d2013-10-25 10:40:16 +05303111 /* When an rx-obj gets into post_starved state, just
3112 * let be_worker do the posting.
3113 */
3114 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3115 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303116 be_post_rx_frags(rxo, GFP_ATOMIC,
3117 max_t(u32, MAX_RX_POST,
3118 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003119 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003120
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003121 return work_done;
3122}
3123
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303124static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303125{
3126 switch (status) {
3127 case BE_TX_COMP_HDR_PARSE_ERR:
3128 tx_stats(txo)->tx_hdr_parse_err++;
3129 break;
3130 case BE_TX_COMP_NDMA_ERR:
3131 tx_stats(txo)->tx_dma_err++;
3132 break;
3133 case BE_TX_COMP_ACL_ERR:
3134 tx_stats(txo)->tx_spoof_check_err++;
3135 break;
3136 }
3137}
3138
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303139static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303140{
3141 switch (status) {
3142 case LANCER_TX_COMP_LSO_ERR:
3143 tx_stats(txo)->tx_tso_err++;
3144 break;
3145 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3146 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3147 tx_stats(txo)->tx_spoof_check_err++;
3148 break;
3149 case LANCER_TX_COMP_QINQ_ERR:
3150 tx_stats(txo)->tx_qinq_err++;
3151 break;
3152 case LANCER_TX_COMP_PARITY_ERR:
3153 tx_stats(txo)->tx_internal_parity_err++;
3154 break;
3155 case LANCER_TX_COMP_DMA_ERR:
3156 tx_stats(txo)->tx_dma_err++;
3157 break;
3158 }
3159}
3160
Sathya Perlac8f64612014-09-02 09:56:55 +05303161static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3162 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003163{
Sathya Perlac8f64612014-09-02 09:56:55 +05303164 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303165 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003166
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303167 while ((txcp = be_tx_compl_get(txo))) {
3168 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303169 work_done++;
3170
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303171 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303172 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303173 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303174 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303175 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303176 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003177 }
3178
3179 if (work_done) {
3180 be_cq_notify(adapter, txo->cq.id, true, work_done);
3181 atomic_sub(num_wrbs, &txo->q.used);
3182
3183 /* As Tx wrbs have been freed up, wake up netdev queue
3184 * if it was stopped due to lack of tx wrbs. */
3185 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303186 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003187 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003188 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003190 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3191 tx_stats(txo)->tx_compl += work_done;
3192 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3193 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003194}
Sathya Perla3c8def92011-06-12 20:01:58 +00003195
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003196#ifdef CONFIG_NET_RX_BUSY_POLL
3197static inline bool be_lock_napi(struct be_eq_obj *eqo)
3198{
3199 bool status = true;
3200
3201 spin_lock(&eqo->lock); /* BH is already disabled */
3202 if (eqo->state & BE_EQ_LOCKED) {
3203 WARN_ON(eqo->state & BE_EQ_NAPI);
3204 eqo->state |= BE_EQ_NAPI_YIELD;
3205 status = false;
3206 } else {
3207 eqo->state = BE_EQ_NAPI;
3208 }
3209 spin_unlock(&eqo->lock);
3210 return status;
3211}
3212
3213static inline void be_unlock_napi(struct be_eq_obj *eqo)
3214{
3215 spin_lock(&eqo->lock); /* BH is already disabled */
3216
3217 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3218 eqo->state = BE_EQ_IDLE;
3219
3220 spin_unlock(&eqo->lock);
3221}
3222
3223static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3224{
3225 bool status = true;
3226
3227 spin_lock_bh(&eqo->lock);
3228 if (eqo->state & BE_EQ_LOCKED) {
3229 eqo->state |= BE_EQ_POLL_YIELD;
3230 status = false;
3231 } else {
3232 eqo->state |= BE_EQ_POLL;
3233 }
3234 spin_unlock_bh(&eqo->lock);
3235 return status;
3236}
3237
3238static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3239{
3240 spin_lock_bh(&eqo->lock);
3241
3242 WARN_ON(eqo->state & (BE_EQ_NAPI));
3243 eqo->state = BE_EQ_IDLE;
3244
3245 spin_unlock_bh(&eqo->lock);
3246}
3247
3248static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3249{
3250 spin_lock_init(&eqo->lock);
3251 eqo->state = BE_EQ_IDLE;
3252}
3253
3254static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3255{
3256 local_bh_disable();
3257
3258 /* It's enough to just acquire napi lock on the eqo to stop
3259 * be_busy_poll() from processing any queueus.
3260 */
3261 while (!be_lock_napi(eqo))
3262 mdelay(1);
3263
3264 local_bh_enable();
3265}
3266
3267#else /* CONFIG_NET_RX_BUSY_POLL */
3268
3269static inline bool be_lock_napi(struct be_eq_obj *eqo)
3270{
3271 return true;
3272}
3273
3274static inline void be_unlock_napi(struct be_eq_obj *eqo)
3275{
3276}
3277
3278static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3279{
3280 return false;
3281}
3282
3283static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3284{
3285}
3286
3287static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3288{
3289}
3290
3291static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3292{
3293}
3294#endif /* CONFIG_NET_RX_BUSY_POLL */
3295
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303296int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003297{
3298 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3299 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003300 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303301 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303302 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003303 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003304
Sathya Perla0b545a62012-11-23 00:27:18 +00003305 num_evts = events_get(eqo);
3306
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303307 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3308 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309
Sathya Perla6384a4d2013-10-25 10:40:16 +05303310 if (be_lock_napi(eqo)) {
3311 /* This loop will iterate twice for EQ0 in which
3312 * completions of the last RXQ (default one) are also processed
3313 * For other EQs the loop iterates only once
3314 */
3315 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3316 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3317 max_work = max(work, max_work);
3318 }
3319 be_unlock_napi(eqo);
3320 } else {
3321 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003322 }
3323
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003324 if (is_mcc_eqo(eqo))
3325 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003327 if (max_work < budget) {
3328 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003329
3330 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3331 * delay via a delay multiplier encoding value
3332 */
3333 if (skyhawk_chip(adapter))
3334 mult_enc = be_get_eq_delay_mult_enc(eqo);
3335
3336 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3337 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003338 } else {
3339 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003340 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003341 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003342 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003343}
3344
Sathya Perla6384a4d2013-10-25 10:40:16 +05303345#ifdef CONFIG_NET_RX_BUSY_POLL
3346static int be_busy_poll(struct napi_struct *napi)
3347{
3348 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3349 struct be_adapter *adapter = eqo->adapter;
3350 struct be_rx_obj *rxo;
3351 int i, work = 0;
3352
3353 if (!be_lock_busy_poll(eqo))
3354 return LL_FLUSH_BUSY;
3355
3356 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3357 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3358 if (work)
3359 break;
3360 }
3361
3362 be_unlock_busy_poll(eqo);
3363 return work;
3364}
3365#endif
3366
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003367void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003368{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003369 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3370 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003371 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303372 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003373
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303374 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003375 return;
3376
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003377 if (lancer_chip(adapter)) {
3378 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3379 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303380 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003381 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303382 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003383 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303384 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303385 /* Do not log error messages if its a FW reset */
3386 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3387 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3388 dev_info(dev, "Firmware update in progress\n");
3389 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303390 dev_err(dev, "Error detected in the card\n");
3391 dev_err(dev, "ERR: sliport status 0x%x\n",
3392 sliport_status);
3393 dev_err(dev, "ERR: sliport error1 0x%x\n",
3394 sliport_err1);
3395 dev_err(dev, "ERR: sliport error2 0x%x\n",
3396 sliport_err2);
3397 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003398 }
3399 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003400 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3401 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3402 ue_lo_mask = ioread32(adapter->pcicfg +
3403 PCICFG_UE_STATUS_LOW_MASK);
3404 ue_hi_mask = ioread32(adapter->pcicfg +
3405 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003406
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003407 ue_lo = (ue_lo & ~ue_lo_mask);
3408 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003409
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303410 /* On certain platforms BE hardware can indicate spurious UEs.
3411 * Allow HW to stop working completely in case of a real UE.
3412 * Hence not setting the hw_error for UE detection.
3413 */
3414
3415 if (ue_lo || ue_hi) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303416 dev_err(dev, "Error detected in the adapter");
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303417 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303418 be_set_error(adapter, BE_ERROR_UE);
3419
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303420 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3421 if (ue_lo & 1)
3422 dev_err(dev, "UE: %s bit set\n",
3423 ue_status_low_desc[i]);
3424 }
3425 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3426 if (ue_hi & 1)
3427 dev_err(dev, "UE: %s bit set\n",
3428 ue_status_hi_desc[i]);
3429 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303430 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003431 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003432}
3433
Sathya Perla8d56ff12009-11-22 22:02:26 +00003434static void be_msix_disable(struct be_adapter *adapter)
3435{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003436 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003437 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003438 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303439 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003440 }
3441}
3442
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003443static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003444{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003445 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003446 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003447 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003448
Sathya Perlace7faf02016-06-22 08:54:53 -04003449 /* If RoCE is supported, program the max number of vectors that
3450 * could be used for NIC and RoCE, else, just program the number
3451 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303452 */
Sathya Perlae2617682016-06-22 08:54:54 -04003453 if (be_roce_supported(adapter)) {
3454 max_roce_eqs =
3455 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3456 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3457 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3458 } else {
3459 num_vec = max(adapter->cfg_num_rx_irqs,
3460 adapter->cfg_num_tx_irqs);
3461 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003462
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003463 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003464 adapter->msix_entries[i].entry = i;
3465
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003466 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3467 MIN_MSIX_VECTORS, num_vec);
3468 if (num_vec < 0)
3469 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003470
Sathya Perla92bf14a2013-08-27 16:57:32 +05303471 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3472 adapter->num_msix_roce_vec = num_vec / 2;
3473 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3474 adapter->num_msix_roce_vec);
3475 }
3476
3477 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3478
3479 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3480 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003481 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003482
3483fail:
3484 dev_warn(dev, "MSIx enable failed\n");
3485
3486 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003487 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003488 return num_vec;
3489 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003490}
3491
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003492static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303493 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003494{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303495 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003496}
3497
3498static int be_msix_register(struct be_adapter *adapter)
3499{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003500 struct net_device *netdev = adapter->netdev;
3501 struct be_eq_obj *eqo;
3502 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003503
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003504 for_all_evt_queues(adapter, eqo, i) {
3505 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3506 vec = be_msix_vec_get(adapter, eqo);
3507 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003508 if (status)
3509 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003510
3511 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003512 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003513
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003514 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003515err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303516 for (i--; i >= 0; i--) {
3517 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003518 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303519 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003520 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303521 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003522 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003523 return status;
3524}
3525
3526static int be_irq_register(struct be_adapter *adapter)
3527{
3528 struct net_device *netdev = adapter->netdev;
3529 int status;
3530
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003531 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003532 status = be_msix_register(adapter);
3533 if (status == 0)
3534 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003535 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003536 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003537 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003538 }
3539
Sathya Perlae49cc342012-11-27 19:50:02 +00003540 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003541 netdev->irq = adapter->pdev->irq;
3542 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003543 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003544 if (status) {
3545 dev_err(&adapter->pdev->dev,
3546 "INTx request IRQ failed - err %d\n", status);
3547 return status;
3548 }
3549done:
3550 adapter->isr_registered = true;
3551 return 0;
3552}
3553
3554static void be_irq_unregister(struct be_adapter *adapter)
3555{
3556 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003557 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003558 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003559
3560 if (!adapter->isr_registered)
3561 return;
3562
3563 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003564 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003565 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003566 goto done;
3567 }
3568
3569 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003570 for_all_evt_queues(adapter, eqo, i) {
3571 vec = be_msix_vec_get(adapter, eqo);
3572 irq_set_affinity_hint(vec, NULL);
3573 free_irq(vec, eqo);
3574 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003575
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003576done:
3577 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003578}
3579
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003580static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003581{
Ajit Khaparde62219062016-02-10 22:45:53 +05303582 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003583 struct be_queue_info *q;
3584 struct be_rx_obj *rxo;
3585 int i;
3586
3587 for_all_rx_queues(adapter, rxo, i) {
3588 q = &rxo->q;
3589 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003590 /* If RXQs are destroyed while in an "out of buffer"
3591 * state, there is a possibility of an HW stall on
3592 * Lancer. So, post 64 buffers to each queue to relieve
3593 * the "out of buffer" condition.
3594 * Make sure there's space in the RXQ before posting.
3595 */
3596 if (lancer_chip(adapter)) {
3597 be_rx_cq_clean(rxo);
3598 if (atomic_read(&q->used) == 0)
3599 be_post_rx_frags(rxo, GFP_KERNEL,
3600 MAX_RX_POST);
3601 }
3602
Sathya Perla482c9e72011-06-29 23:33:17 +00003603 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003604 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003605 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003606 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003607 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003608 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303609
3610 if (rss->rss_flags) {
3611 rss->rss_flags = RSS_ENABLE_NONE;
3612 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3613 128, rss->rss_hkey);
3614 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003615}
3616
Kalesh APbcc84142015-08-05 03:27:48 -04003617static void be_disable_if_filters(struct be_adapter *adapter)
3618{
Ivan Vecera6d928ae2017-01-13 22:38:28 +01003619 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3620 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3621 check_privilege(adapter, BE_PRIV_FILTMGMT))
3622 be_dev_mac_del(adapter, adapter->pmac_id[0]);
3623
Kalesh APbcc84142015-08-05 03:27:48 -04003624 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003625 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003626
3627 /* The IFACE flags are enabled in the open path and cleared
3628 * in the close path. When a VF gets detached from the host and
3629 * assigned to a VM the following happens:
3630 * - VF's IFACE flags get cleared in the detach path
3631 * - IFACE create is issued by the VF in the attach path
3632 * Due to a bug in the BE3/Skyhawk-R FW
3633 * (Lancer FW doesn't have the bug), the IFACE capability flags
3634 * specified along with the IFACE create cmd issued by a VF are not
3635 * honoured by FW. As a consequence, if a *new* driver
3636 * (that enables/disables IFACE flags in open/close)
3637 * is loaded in the host and an *old* driver is * used by a VM/VF,
3638 * the IFACE gets created *without* the needed flags.
3639 * To avoid this, disable RX-filter flags only for Lancer.
3640 */
3641 if (lancer_chip(adapter)) {
3642 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3643 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3644 }
3645}
3646
Sathya Perla889cd4b2010-05-30 23:33:45 +00003647static int be_close(struct net_device *netdev)
3648{
3649 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003650 struct be_eq_obj *eqo;
3651 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003652
Kalesh APe1ad8e32014-04-14 16:12:41 +05303653 /* This protection is needed as be_close() may be called even when the
3654 * adapter is in cleared state (after eeh perm failure)
3655 */
3656 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3657 return 0;
3658
Sathya Perlab7172412016-07-27 05:26:18 -04003659 /* Before attempting cleanup ensure all the pending cmds in the
3660 * config_wq have finished execution
3661 */
3662 flush_workqueue(be_wq);
3663
Kalesh APbcc84142015-08-05 03:27:48 -04003664 be_disable_if_filters(adapter);
3665
Ivan Veceradff345c52013-11-27 08:59:32 +01003666 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3667 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003668 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303669 be_disable_busy_poll(eqo);
3670 }
David S. Miller71237b62013-11-28 18:53:36 -05003671 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003672 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003673
3674 be_async_mcc_disable(adapter);
3675
3676 /* Wait for all pending tx completions to arrive so that
3677 * all tx skbs are freed.
3678 */
Sathya Perlafba87552013-05-08 02:05:50 +00003679 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303680 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003681
3682 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003683
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003684 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003685 if (msix_enabled(adapter))
3686 synchronize_irq(be_msix_vec_get(adapter, eqo));
3687 else
3688 synchronize_irq(netdev->irq);
3689 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003690 }
3691
Sathya Perla889cd4b2010-05-30 23:33:45 +00003692 be_irq_unregister(adapter);
3693
Sathya Perla482c9e72011-06-29 23:33:17 +00003694 return 0;
3695}
3696
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003697static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003698{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003699 struct rss_info *rss = &adapter->rss_info;
3700 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003701 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003702 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003703
3704 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003705 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3706 sizeof(struct be_eth_rx_d));
3707 if (rc)
3708 return rc;
3709 }
3710
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003711 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3712 rxo = default_rxo(adapter);
3713 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3714 rx_frag_size, adapter->if_handle,
3715 false, &rxo->rss_id);
3716 if (rc)
3717 return rc;
3718 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003719
3720 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003721 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003722 rx_frag_size, adapter->if_handle,
3723 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003724 if (rc)
3725 return rc;
3726 }
3727
3728 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003729 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003730 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303731 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003732 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303733 rss->rsstable[j + i] = rxo->rss_id;
3734 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003735 }
3736 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303737 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3738 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003739
3740 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303741 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3742 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303743
3744 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3745 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3746 RSS_INDIR_TABLE_LEN, rss_key);
3747 if (rc) {
3748 rss->rss_flags = RSS_ENABLE_NONE;
3749 return rc;
3750 }
3751
3752 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303753 } else {
3754 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303755 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303756 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003757
Venkata Duvvurue2557872014-04-21 15:38:00 +05303758
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003759 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3760 * which is a queue empty condition
3761 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003762 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003763 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3764
Sathya Perla889cd4b2010-05-30 23:33:45 +00003765 return 0;
3766}
3767
Kalesh APbcc84142015-08-05 03:27:48 -04003768static int be_enable_if_filters(struct be_adapter *adapter)
3769{
3770 int status;
3771
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003772 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003773 if (status)
3774 return status;
3775
Ivan Vecera34393522017-01-13 22:38:29 +01003776 /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
3777 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3778 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05303779 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003780 if (status)
3781 return status;
Suresh Reddyc27ebf52016-09-07 19:57:53 +05303782 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003783 }
3784
3785 if (adapter->vlans_added)
3786 be_vid_config(adapter);
3787
Sathya Perlab7172412016-07-27 05:26:18 -04003788 __be_set_rx_mode(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003789
3790 return 0;
3791}
3792
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003793static int be_open(struct net_device *netdev)
3794{
3795 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003796 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003797 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003798 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003799 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003800 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003801
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003802 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003803 if (status)
3804 goto err;
3805
Kalesh APbcc84142015-08-05 03:27:48 -04003806 status = be_enable_if_filters(adapter);
3807 if (status)
3808 goto err;
3809
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003810 status = be_irq_register(adapter);
3811 if (status)
3812 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003813
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003814 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003815 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003816
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003817 for_all_tx_queues(adapter, txo, i)
3818 be_cq_notify(adapter, txo->cq.id, true, 0);
3819
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003820 be_async_mcc_enable(adapter);
3821
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003822 for_all_evt_queues(adapter, eqo, i) {
3823 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303824 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003825 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003826 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003827 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003828
Sathya Perla323ff712012-09-28 04:39:43 +00003829 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003830 if (!status)
3831 be_link_status_update(adapter, link_status);
3832
Sathya Perlafba87552013-05-08 02:05:50 +00003833 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303834 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003835 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303836
Sathya Perla889cd4b2010-05-30 23:33:45 +00003837 return 0;
3838err:
3839 be_close(adapter->netdev);
3840 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003841}
3842
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003843static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3844{
3845 u32 addr;
3846
3847 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3848
3849 mac[5] = (u8)(addr & 0xFF);
3850 mac[4] = (u8)((addr >> 8) & 0xFF);
3851 mac[3] = (u8)((addr >> 16) & 0xFF);
3852 /* Use the OUI from the current MAC address */
3853 memcpy(mac, adapter->netdev->dev_addr, 3);
3854}
3855
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003856/*
3857 * Generate a seed MAC address from the PF MAC Address using jhash.
3858 * MAC Address for VFs are assigned incrementally starting from the seed.
3859 * These addresses are programmed in the ASIC by the PF and the VF driver
3860 * queries for the MAC address during its probe.
3861 */
Sathya Perla4c876612013-02-03 20:30:11 +00003862static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003863{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003864 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003865 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003866 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003867 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003868
3869 be_vf_eth_addr_generate(adapter, mac);
3870
Sathya Perla11ac75e2011-12-13 00:58:50 +00003871 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303872 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003873 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003874 vf_cfg->if_handle,
3875 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303876 else
3877 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3878 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003879
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003880 if (status)
3881 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303882 "Mac address assignment failed for VF %d\n",
3883 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003884 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003885 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003886
3887 mac[5] += 1;
3888 }
3889 return status;
3890}
3891
Sathya Perla4c876612013-02-03 20:30:11 +00003892static int be_vfs_mac_query(struct be_adapter *adapter)
3893{
3894 int status, vf;
3895 u8 mac[ETH_ALEN];
3896 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003897
3898 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303899 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3900 mac, vf_cfg->if_handle,
3901 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003902 if (status)
3903 return status;
3904 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3905 }
3906 return 0;
3907}
3908
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003909static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003910{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003911 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003912 u32 vf;
3913
Sathya Perla257a3fe2013-06-14 15:54:51 +05303914 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003915 dev_warn(&adapter->pdev->dev,
3916 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003917 goto done;
3918 }
3919
Sathya Perlab4c1df92013-05-08 02:05:47 +00003920 pci_disable_sriov(adapter->pdev);
3921
Sathya Perla11ac75e2011-12-13 00:58:50 +00003922 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303923 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003924 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3925 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303926 else
3927 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3928 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003929
Sathya Perla11ac75e2011-12-13 00:58:50 +00003930 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3931 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003932
3933 if (BE3_chip(adapter))
3934 be_cmd_set_hsw_config(adapter, 0, 0,
3935 adapter->if_handle,
3936 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003937done:
3938 kfree(adapter->vf_cfg);
3939 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303940 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003941}
3942
Sathya Perla77071332013-08-27 16:57:34 +05303943static void be_clear_queues(struct be_adapter *adapter)
3944{
3945 be_mcc_queues_destroy(adapter);
3946 be_rx_cqs_destroy(adapter);
3947 be_tx_queues_destroy(adapter);
3948 be_evt_queues_destroy(adapter);
3949}
3950
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303951static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003952{
Sathya Perla191eb752012-02-23 18:50:13 +00003953 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3954 cancel_delayed_work_sync(&adapter->work);
3955 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3956 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303957}
3958
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003959static void be_cancel_err_detection(struct be_adapter *adapter)
3960{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303961 struct be_error_recovery *err_rec = &adapter->error_recovery;
3962
3963 if (!be_err_recovery_workq)
3964 return;
3965
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003966 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303967 cancel_delayed_work_sync(&err_rec->err_detection_work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003968 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3969 }
3970}
3971
Sathya Perlac9c47142014-03-27 10:46:19 +05303972static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3973{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003974 struct net_device *netdev = adapter->netdev;
3975
Sathya Perlac9c47142014-03-27 10:46:19 +05303976 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3977 be_cmd_manage_iface(adapter, adapter->if_handle,
3978 OP_CONVERT_TUNNEL_TO_NORMAL);
3979
3980 if (adapter->vxlan_port)
3981 be_cmd_set_vxlan_port(adapter, 0);
3982
3983 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3984 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003985
3986 netdev->hw_enc_features = 0;
3987 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303988 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303989}
3990
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003991static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3992 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05003993{
3994 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003995 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3996 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003997 u16 num_vf_qs = 1;
3998
Somnath Koturde2b1e02016-06-06 07:22:10 -04003999 /* Distribute the queue resources among the PF and it's VFs */
4000 if (num_vfs) {
4001 /* Divide the rx queues evenly among the VFs and the PF, capped
4002 * at VF-EQ-count. Any remainder queues belong to the PF.
4003 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05304004 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4005 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05004006
Somnath Koturde2b1e02016-06-06 07:22:10 -04004007 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4008 * RSS Tables per port. Provide RSS on VFs, only if number of
4009 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05004010 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004011 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05004012 num_vf_qs = 1;
4013 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004014
4015 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4016 * which are modifiable using SET_PROFILE_CONFIG cmd.
4017 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004018 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4019 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004020
4021 /* If RSS IFACE capability flags are modifiable for a VF, set the
4022 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4023 * more than 1 RSSQ is available for a VF.
4024 * Otherwise, provision only 1 queue pair for VF.
4025 */
4026 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4027 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4028 if (num_vf_qs > 1) {
4029 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4030 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4031 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4032 } else {
4033 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4034 BE_IF_FLAGS_DEFQ_RSS);
4035 }
4036 } else {
4037 num_vf_qs = 1;
4038 }
4039
4040 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4041 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4042 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4043 }
4044
4045 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4046 vft_res->max_rx_qs = num_vf_qs;
4047 vft_res->max_rss_qs = num_vf_qs;
4048 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4049 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4050
4051 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4052 * among the PF and it's VFs, if the fields are changeable
4053 */
4054 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4055 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4056
4057 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4058 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4059
4060 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4061 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4062
4063 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4064 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004065}
4066
Sathya Perlab7172412016-07-27 05:26:18 -04004067static void be_if_destroy(struct be_adapter *adapter)
4068{
4069 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4070
4071 kfree(adapter->pmac_id);
4072 adapter->pmac_id = NULL;
4073
4074 kfree(adapter->mc_list);
4075 adapter->mc_list = NULL;
4076
4077 kfree(adapter->uc_list);
4078 adapter->uc_list = NULL;
4079}
4080
Somnath Koturb05004a2013-12-05 12:08:16 +05304081static int be_clear(struct be_adapter *adapter)
4082{
Vasundhara Volamf2858732015-03-04 00:44:33 -05004083 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004084 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004085
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304086 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00004087
Sathya Perlab7172412016-07-27 05:26:18 -04004088 flush_workqueue(be_wq);
4089
Sathya Perla11ac75e2011-12-13 00:58:50 +00004090 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004091 be_vf_clear(adapter);
4092
Vasundhara Volambec84e62014-06-30 13:01:32 +05304093 /* Re-configure FW to distribute resources evenly across max-supported
4094 * number of VFs, only when VFs are not already enabled.
4095 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05004096 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4097 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004098 be_calculate_vf_res(adapter,
4099 pci_sriov_get_totalvfs(pdev),
4100 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304101 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004102 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004103 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004104 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304105
Sathya Perlac9c47142014-03-27 10:46:19 +05304106 be_disable_vxlan_offloads(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004107
Sathya Perlab7172412016-07-27 05:26:18 -04004108 be_if_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004109
Sathya Perla77071332013-08-27 16:57:34 +05304110 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004111
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004112 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304113 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004114 return 0;
4115}
4116
Sathya Perla4c876612013-02-03 20:30:11 +00004117static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004118{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304119 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04004120 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00004121 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05004122 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004123
Kalesh AP0700d812015-01-20 03:51:43 -05004124 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004125 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004126
Sathya Perla4c876612013-02-03 20:30:11 +00004127 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304128 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04004129 status = be_cmd_get_profile_config(adapter, &res, NULL,
4130 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004131 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05304132 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004133 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304134 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004135 /* Prevent VFs from enabling VLAN promiscuous
4136 * mode
4137 */
4138 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4139 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304140 }
Sathya Perla4c876612013-02-03 20:30:11 +00004141
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004142 /* PF should enable IF flags during proxy if_create call */
4143 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004144 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4145 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004146 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004147 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004148 }
Kalesh AP0700d812015-01-20 03:51:43 -05004149
4150 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004151}
4152
Sathya Perla39f1d942012-05-08 19:41:24 +00004153static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004154{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004155 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004156 int vf;
4157
Sathya Perla39f1d942012-05-08 19:41:24 +00004158 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4159 GFP_KERNEL);
4160 if (!adapter->vf_cfg)
4161 return -ENOMEM;
4162
Sathya Perla11ac75e2011-12-13 00:58:50 +00004163 for_all_vfs(adapter, vf_cfg, vf) {
4164 vf_cfg->if_handle = -1;
4165 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004166 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004167 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004168}
4169
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004170static int be_vf_setup(struct be_adapter *adapter)
4171{
Sathya Perla4c876612013-02-03 20:30:11 +00004172 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304173 struct be_vf_cfg *vf_cfg;
4174 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004175 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004176
Sathya Perla257a3fe2013-06-14 15:54:51 +05304177 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004178
4179 status = be_vf_setup_init(adapter);
4180 if (status)
4181 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004182
Sathya Perla4c876612013-02-03 20:30:11 +00004183 if (old_vfs) {
4184 for_all_vfs(adapter, vf_cfg, vf) {
4185 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4186 if (status)
4187 goto err;
4188 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004189
Sathya Perla4c876612013-02-03 20:30:11 +00004190 status = be_vfs_mac_query(adapter);
4191 if (status)
4192 goto err;
4193 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304194 status = be_vfs_if_create(adapter);
4195 if (status)
4196 goto err;
4197
Sathya Perla39f1d942012-05-08 19:41:24 +00004198 status = be_vf_eth_addr_config(adapter);
4199 if (status)
4200 goto err;
4201 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004202
Sathya Perla11ac75e2011-12-13 00:58:50 +00004203 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304204 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004205 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4206 vf + 1);
4207 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304208 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004209 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304210 BE_PRIV_FILTMGMT,
4211 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004212 if (!status) {
4213 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304214 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4215 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004216 }
Sathya Perla04a06022013-07-23 15:25:00 +05304217 }
4218
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304219 /* Allow full available bandwidth */
4220 if (!old_vfs)
4221 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004222
Kalesh APe7bcbd72015-05-06 05:30:32 -04004223 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4224 vf_cfg->if_handle, NULL,
4225 &spoofchk);
4226 if (!status)
4227 vf_cfg->spoofchk = spoofchk;
4228
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304229 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304230 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304231 be_cmd_set_logical_link_config(adapter,
4232 IFLA_VF_LINK_STATE_AUTO,
4233 vf+1);
4234 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004235 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004236
4237 if (!old_vfs) {
4238 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4239 if (status) {
4240 dev_err(dev, "SRIOV enable failed\n");
4241 adapter->num_vfs = 0;
4242 goto err;
4243 }
4244 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304245
Somnath Kotur884476b2016-06-22 08:54:55 -04004246 if (BE3_chip(adapter)) {
4247 /* On BE3, enable VEB only when SRIOV is enabled */
4248 status = be_cmd_set_hsw_config(adapter, 0, 0,
4249 adapter->if_handle,
4250 PORT_FWD_TYPE_VEB, 0);
4251 if (status)
4252 goto err;
4253 }
4254
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304255 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004256 return 0;
4257err:
Sathya Perla4c876612013-02-03 20:30:11 +00004258 dev_err(dev, "VF setup failed\n");
4259 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004260 return status;
4261}
4262
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304263/* Converting function_mode bits on BE3 to SH mc_type enums */
4264
4265static u8 be_convert_mc_type(u32 function_mode)
4266{
Suresh Reddy66064db2014-06-23 16:41:29 +05304267 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304268 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304269 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304270 return FLEX10;
4271 else if (function_mode & VNIC_MODE)
4272 return vNIC2;
4273 else if (function_mode & UMC_ENABLED)
4274 return UMC;
4275 else
4276 return MC_NONE;
4277}
4278
Sathya Perla92bf14a2013-08-27 16:57:32 +05304279/* On BE2/BE3 FW does not suggest the supported limits */
4280static void BEx_get_resources(struct be_adapter *adapter,
4281 struct be_resources *res)
4282{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304283 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304284
4285 if (be_physfn(adapter))
4286 res->max_uc_mac = BE_UC_PMAC_COUNT;
4287 else
4288 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4289
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304290 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4291
4292 if (be_is_mc(adapter)) {
4293 /* Assuming that there are 4 channels per port,
4294 * when multi-channel is enabled
4295 */
4296 if (be_is_qnq_mode(adapter))
4297 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4298 else
4299 /* In a non-qnq multichannel mode, the pvid
4300 * takes up one vlan entry
4301 */
4302 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4303 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304304 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304305 }
4306
Sathya Perla92bf14a2013-08-27 16:57:32 +05304307 res->max_mcast_mac = BE_MAX_MC;
4308
Vasundhara Volama5243da2014-03-11 18:53:07 +05304309 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4310 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4311 * *only* if it is RSS-capable.
4312 */
4313 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004314 be_virtfn(adapter) ||
4315 (be_is_mc(adapter) &&
4316 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304317 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304318 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4319 struct be_resources super_nic_res = {0};
4320
4321 /* On a SuperNIC profile, the driver needs to use the
4322 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4323 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004324 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4325 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4326 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304327 /* Some old versions of BE3 FW don't report max_tx_qs value */
4328 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4329 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304330 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304331 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304332
4333 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4334 !use_sriov && be_physfn(adapter))
4335 res->max_rss_qs = (adapter->be3_native) ?
4336 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4337 res->max_rx_qs = res->max_rss_qs + 1;
4338
Suresh Reddye3dc8672014-01-06 13:02:25 +05304339 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304340 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304341 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4342 else
4343 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304344
4345 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004346 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304347 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4348 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4349}
4350
Sathya Perla30128032011-11-10 19:17:57 +00004351static void be_setup_init(struct be_adapter *adapter)
4352{
4353 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004354 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004355 adapter->if_handle = -1;
4356 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004357 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304358 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004359 if (be_physfn(adapter))
4360 adapter->cmd_privileges = MAX_PRIVILEGES;
4361 else
4362 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004363}
4364
Somnath Koturde2b1e02016-06-06 07:22:10 -04004365/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4366 * However, this HW limitation is not exposed to the host via any SLI cmd.
4367 * As a result, in the case of SRIOV and in particular multi-partition configs
4368 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4369 * for distribution between the VFs. This self-imposed limit will determine the
4370 * no: of VFs for which RSS can be enabled.
4371 */
Baoyou Xied766e7e2016-09-18 16:35:29 +08004372static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
Somnath Koturde2b1e02016-06-06 07:22:10 -04004373{
4374 struct be_port_resources port_res = {0};
4375 u8 rss_tables_on_port;
4376 u16 max_vfs = be_max_vfs(adapter);
4377
4378 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4379 RESOURCE_LIMITS, 0);
4380
4381 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4382
4383 /* Each PF Pool's RSS Tables limit =
4384 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4385 */
4386 adapter->pool_res.max_rss_tables =
4387 max_vfs * rss_tables_on_port / port_res.max_vfs;
4388}
4389
Vasundhara Volambec84e62014-06-30 13:01:32 +05304390static int be_get_sriov_config(struct be_adapter *adapter)
4391{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304392 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304393 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304394
Somnath Koturde2b1e02016-06-06 07:22:10 -04004395 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4396 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304397
Vasundhara Volamace40af2015-03-04 00:44:34 -05004398 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304399 if (BE3_chip(adapter) && !res.max_vfs) {
4400 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4401 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4402 }
4403
Sathya Perlad3d18312014-08-01 17:47:30 +05304404 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304405
Vasundhara Volamace40af2015-03-04 00:44:34 -05004406 /* If during previous unload of the driver, the VFs were not disabled,
4407 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4408 * Instead use the TotalVFs value stored in the pci-dev struct.
4409 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304410 old_vfs = pci_num_vf(adapter->pdev);
4411 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004412 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4413 old_vfs);
4414
4415 adapter->pool_res.max_vfs =
4416 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304417 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304418 }
4419
Somnath Koturde2b1e02016-06-06 07:22:10 -04004420 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4421 be_calculate_pf_pool_rss_tables(adapter);
4422 dev_info(&adapter->pdev->dev,
4423 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4424 be_max_pf_pool_rss_tables(adapter));
4425 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304426 return 0;
4427}
4428
Vasundhara Volamace40af2015-03-04 00:44:34 -05004429static void be_alloc_sriov_res(struct be_adapter *adapter)
4430{
4431 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004432 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004433 int status;
4434
4435 be_get_sriov_config(adapter);
4436
4437 if (!old_vfs)
4438 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4439
4440 /* When the HW is in SRIOV capable configuration, the PF-pool
4441 * resources are given to PF during driver load, if there are no
4442 * old VFs. This facility is not available in BE3 FW.
4443 * Also, this is done by FW in Lancer chip.
4444 */
4445 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004446 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004447 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004448 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004449 if (status)
4450 dev_err(&adapter->pdev->dev,
4451 "Failed to optimize SRIOV resources\n");
4452 }
4453}
4454
Sathya Perla92bf14a2013-08-27 16:57:32 +05304455static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004456{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304457 struct device *dev = &adapter->pdev->dev;
4458 struct be_resources res = {0};
4459 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004460
Sathya Perla92bf14a2013-08-27 16:57:32 +05304461 /* For Lancer, SH etc read per-function resource limits from FW.
4462 * GET_FUNC_CONFIG returns per function guaranteed limits.
4463 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4464 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004465 if (BEx_chip(adapter)) {
4466 BEx_get_resources(adapter, &res);
4467 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304468 status = be_cmd_get_func_config(adapter, &res);
4469 if (status)
4470 return status;
4471
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004472 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4473 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4474 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4475 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004476 }
4477
Sathya Perlace7faf02016-06-22 08:54:53 -04004478 /* If RoCE is supported stash away half the EQs for RoCE */
4479 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4480 res.max_evt_qs / 2 : res.max_evt_qs;
4481 adapter->res = res;
4482
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004483 /* If FW supports RSS default queue, then skip creating non-RSS
4484 * queue for non-IP traffic.
4485 */
4486 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4487 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4488
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304489 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4490 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004491 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304492 be_max_vfs(adapter));
4493 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4494 be_max_uc(adapter), be_max_mc(adapter),
4495 be_max_vlans(adapter));
4496
Sathya Perlae2617682016-06-22 08:54:54 -04004497 /* Ensure RX and TX queues are created in pairs at init time */
4498 adapter->cfg_num_rx_irqs =
4499 min_t(u16, netif_get_num_default_rss_queues(),
4500 be_max_qp_irqs(adapter));
4501 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304502 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004503}
4504
Sathya Perla39f1d942012-05-08 19:41:24 +00004505static int be_get_config(struct be_adapter *adapter)
4506{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004507 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304508 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004509
Suresh Reddy980df242015-12-30 01:29:03 -05004510 status = be_cmd_get_cntl_attributes(adapter);
4511 if (status)
4512 return status;
4513
Kalesh APe97e3cd2014-07-17 16:20:26 +05304514 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004515 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304516 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004517
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004518 if (!lancer_chip(adapter) && be_physfn(adapter))
4519 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4520
Sathya Perla6b085ba2015-02-23 04:20:09 -05004521 if (BEx_chip(adapter)) {
4522 level = be_cmd_get_fw_log_level(adapter);
4523 adapter->msg_enable =
4524 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4525 }
4526
4527 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004528 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4529 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004530
Vasundhara Volam21252372015-02-06 08:18:42 -05004531 be_cmd_query_port_name(adapter);
4532
4533 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304534 status = be_cmd_get_active_profile(adapter, &profile_id);
4535 if (!status)
4536 dev_info(&adapter->pdev->dev,
4537 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304538 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304539
Sathya Perla92bf14a2013-08-27 16:57:32 +05304540 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004541}
4542
Sathya Perla95046b92013-07-23 15:25:02 +05304543static int be_mac_setup(struct be_adapter *adapter)
4544{
4545 u8 mac[ETH_ALEN];
4546 int status;
4547
4548 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4549 status = be_cmd_get_perm_mac(adapter, mac);
4550 if (status)
4551 return status;
4552
4553 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4554 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304555 }
4556
Sathya Perla95046b92013-07-23 15:25:02 +05304557 return 0;
4558}
4559
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304560static void be_schedule_worker(struct be_adapter *adapter)
4561{
Sathya Perlab7172412016-07-27 05:26:18 -04004562 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304563 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4564}
4565
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304566static void be_destroy_err_recovery_workq(void)
4567{
4568 if (!be_err_recovery_workq)
4569 return;
4570
4571 flush_workqueue(be_err_recovery_workq);
4572 destroy_workqueue(be_err_recovery_workq);
4573 be_err_recovery_workq = NULL;
4574}
4575
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304576static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004577{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304578 struct be_error_recovery *err_rec = &adapter->error_recovery;
4579
4580 if (!be_err_recovery_workq)
4581 return;
4582
4583 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4584 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004585 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4586}
4587
Sathya Perla77071332013-08-27 16:57:34 +05304588static int be_setup_queues(struct be_adapter *adapter)
4589{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304590 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304591 int status;
4592
4593 status = be_evt_queues_create(adapter);
4594 if (status)
4595 goto err;
4596
4597 status = be_tx_qs_create(adapter);
4598 if (status)
4599 goto err;
4600
4601 status = be_rx_cqs_create(adapter);
4602 if (status)
4603 goto err;
4604
4605 status = be_mcc_queues_create(adapter);
4606 if (status)
4607 goto err;
4608
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304609 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4610 if (status)
4611 goto err;
4612
4613 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4614 if (status)
4615 goto err;
4616
Sathya Perla77071332013-08-27 16:57:34 +05304617 return 0;
4618err:
4619 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4620 return status;
4621}
4622
Ajit Khaparde62219062016-02-10 22:45:53 +05304623static int be_if_create(struct be_adapter *adapter)
4624{
4625 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4626 u32 cap_flags = be_if_cap_flags(adapter);
4627 int status;
4628
Sathya Perlab7172412016-07-27 05:26:18 -04004629 /* alloc required memory for other filtering fields */
4630 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4631 sizeof(*adapter->pmac_id), GFP_KERNEL);
4632 if (!adapter->pmac_id)
4633 return -ENOMEM;
4634
4635 adapter->mc_list = kcalloc(be_max_mc(adapter),
4636 sizeof(*adapter->mc_list), GFP_KERNEL);
4637 if (!adapter->mc_list)
4638 return -ENOMEM;
4639
4640 adapter->uc_list = kcalloc(be_max_uc(adapter),
4641 sizeof(*adapter->uc_list), GFP_KERNEL);
4642 if (!adapter->uc_list)
4643 return -ENOMEM;
4644
Sathya Perlae2617682016-06-22 08:54:54 -04004645 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304646 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4647
4648 en_flags &= cap_flags;
4649 /* will enable all the needed filter flags in be_open() */
4650 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4651 &adapter->if_handle, 0);
4652
Sathya Perlab7172412016-07-27 05:26:18 -04004653 if (status)
4654 return status;
4655
4656 return 0;
Ajit Khaparde62219062016-02-10 22:45:53 +05304657}
4658
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304659int be_update_queues(struct be_adapter *adapter)
4660{
4661 struct net_device *netdev = adapter->netdev;
4662 int status;
4663
4664 if (netif_running(netdev))
4665 be_close(netdev);
4666
4667 be_cancel_worker(adapter);
4668
4669 /* If any vectors have been shared with RoCE we cannot re-program
4670 * the MSIx table.
4671 */
4672 if (!adapter->num_msix_roce_vec)
4673 be_msix_disable(adapter);
4674
4675 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304676 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4677 if (status)
4678 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304679
4680 if (!msix_enabled(adapter)) {
4681 status = be_msix_enable(adapter);
4682 if (status)
4683 return status;
4684 }
4685
Ajit Khaparde62219062016-02-10 22:45:53 +05304686 status = be_if_create(adapter);
4687 if (status)
4688 return status;
4689
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304690 status = be_setup_queues(adapter);
4691 if (status)
4692 return status;
4693
4694 be_schedule_worker(adapter);
4695
4696 if (netif_running(netdev))
4697 status = be_open(netdev);
4698
4699 return status;
4700}
4701
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004702static inline int fw_major_num(const char *fw_ver)
4703{
4704 int fw_major = 0, i;
4705
4706 i = sscanf(fw_ver, "%d.", &fw_major);
4707 if (i != 1)
4708 return 0;
4709
4710 return fw_major;
4711}
4712
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304713/* If it is error recovery, FLR the PF
4714 * Else if any VFs are already enabled don't FLR the PF
4715 */
Sathya Perlaf962f842015-02-23 04:20:16 -05004716static bool be_reset_required(struct be_adapter *adapter)
4717{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304718 if (be_error_recovering(adapter))
4719 return true;
4720 else
4721 return pci_num_vf(adapter->pdev) == 0;
Sathya Perlaf962f842015-02-23 04:20:16 -05004722}
4723
4724/* Wait for the FW to be ready and perform the required initialization */
4725static int be_func_init(struct be_adapter *adapter)
4726{
4727 int status;
4728
4729 status = be_fw_wait_ready(adapter);
4730 if (status)
4731 return status;
4732
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304733 /* FW is now ready; clear errors to allow cmds/doorbell */
4734 be_clear_error(adapter, BE_CLEAR_ALL);
4735
Sathya Perlaf962f842015-02-23 04:20:16 -05004736 if (be_reset_required(adapter)) {
4737 status = be_cmd_reset_function(adapter);
4738 if (status)
4739 return status;
4740
4741 /* Wait for interrupts to quiesce after an FLR */
4742 msleep(100);
Sathya Perlaf962f842015-02-23 04:20:16 -05004743 }
4744
4745 /* Tell FW we're ready to fire cmds */
4746 status = be_cmd_fw_init(adapter);
4747 if (status)
4748 return status;
4749
4750 /* Allow interrupts for other ULPs running on NIC function */
4751 be_intr_set(adapter, true);
4752
4753 return 0;
4754}
4755
Sathya Perla5fb379e2009-06-18 00:02:59 +00004756static int be_setup(struct be_adapter *adapter)
4757{
Sathya Perla39f1d942012-05-08 19:41:24 +00004758 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004759 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004760
Sathya Perlaf962f842015-02-23 04:20:16 -05004761 status = be_func_init(adapter);
4762 if (status)
4763 return status;
4764
Sathya Perla30128032011-11-10 19:17:57 +00004765 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004766
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004767 if (!lancer_chip(adapter))
4768 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004769
Suresh Reddy980df242015-12-30 01:29:03 -05004770 /* invoke this cmd first to get pf_num and vf_num which are needed
4771 * for issuing profile related cmds
4772 */
4773 if (!BEx_chip(adapter)) {
4774 status = be_cmd_get_func_config(adapter, NULL);
4775 if (status)
4776 return status;
4777 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004778
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004779 status = be_get_config(adapter);
4780 if (status)
4781 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004782
Somnath Koturde2b1e02016-06-06 07:22:10 -04004783 if (!BE2_chip(adapter) && be_physfn(adapter))
4784 be_alloc_sriov_res(adapter);
4785
4786 status = be_get_resources(adapter);
4787 if (status)
4788 goto err;
4789
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004790 status = be_msix_enable(adapter);
4791 if (status)
4792 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004793
Kalesh APbcc84142015-08-05 03:27:48 -04004794 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304795 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004796 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004797 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004798
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304799 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4800 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304801 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304802 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004803 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004804 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004805
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004806 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004807
Sathya Perla95046b92013-07-23 15:25:02 +05304808 status = be_mac_setup(adapter);
4809 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004810 goto err;
4811
Kalesh APe97e3cd2014-07-17 16:20:26 +05304812 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304813 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004814
Somnath Koture9e2a902013-10-24 14:37:53 +05304815 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304816 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304817 adapter->fw_ver);
4818 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4819 }
4820
Kalesh AP00d594c2015-01-20 03:51:44 -05004821 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4822 adapter->rx_fc);
4823 if (status)
4824 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4825 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004826
Kalesh AP00d594c2015-01-20 03:51:44 -05004827 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4828 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004829
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304830 if (be_physfn(adapter))
4831 be_cmd_set_logical_link_config(adapter,
4832 IFLA_VF_LINK_STATE_AUTO, 0);
4833
Somnath Kotur884476b2016-06-22 08:54:55 -04004834 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4835 * confusing a linux bridge or OVS that it might be connected to.
4836 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4837 * when SRIOV is not enabled.
4838 */
4839 if (BE3_chip(adapter))
4840 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4841 PORT_FWD_TYPE_PASSTHRU, 0);
4842
Vasundhara Volambec84e62014-06-30 13:01:32 +05304843 if (adapter->num_vfs)
4844 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004845
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004846 status = be_cmd_get_phy_info(adapter);
4847 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004848 adapter->phy.fc_autoneg = 1;
4849
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304850 if (be_physfn(adapter) && !lancer_chip(adapter))
4851 be_cmd_set_features(adapter);
4852
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304853 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304854 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004855 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004856err:
4857 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004858 return status;
4859}
4860
Ivan Vecera66268732011-12-08 01:31:21 +00004861#ifdef CONFIG_NET_POLL_CONTROLLER
4862static void be_netpoll(struct net_device *netdev)
4863{
4864 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004865 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004866 int i;
4867
Sathya Perlae49cc342012-11-27 19:50:02 +00004868 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004869 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004870 napi_schedule(&eqo->napi);
4871 }
Ivan Vecera66268732011-12-08 01:31:21 +00004872}
4873#endif
4874
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004875int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4876{
4877 const struct firmware *fw;
4878 int status;
4879
4880 if (!netif_running(adapter->netdev)) {
4881 dev_err(&adapter->pdev->dev,
4882 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304883 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004884 }
4885
4886 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4887 if (status)
4888 goto fw_exit;
4889
4890 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4891
4892 if (lancer_chip(adapter))
4893 status = lancer_fw_download(adapter, fw);
4894 else
4895 status = be_fw_download(adapter, fw);
4896
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004897 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304898 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004899
Ajit Khaparde84517482009-09-04 03:12:16 +00004900fw_exit:
4901 release_firmware(fw);
4902 return status;
4903}
4904
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004905static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4906 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004907{
4908 struct be_adapter *adapter = netdev_priv(dev);
4909 struct nlattr *attr, *br_spec;
4910 int rem;
4911 int status = 0;
4912 u16 mode = 0;
4913
4914 if (!sriov_enabled(adapter))
4915 return -EOPNOTSUPP;
4916
4917 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004918 if (!br_spec)
4919 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004920
4921 nla_for_each_nested(attr, br_spec, rem) {
4922 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4923 continue;
4924
Thomas Grafb7c1a312014-11-26 13:42:17 +01004925 if (nla_len(attr) < sizeof(mode))
4926 return -EINVAL;
4927
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004928 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004929 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4930 return -EOPNOTSUPP;
4931
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004932 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4933 return -EINVAL;
4934
4935 status = be_cmd_set_hsw_config(adapter, 0, 0,
4936 adapter->if_handle,
4937 mode == BRIDGE_MODE_VEPA ?
4938 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004939 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004940 if (status)
4941 goto err;
4942
4943 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4944 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4945
4946 return status;
4947 }
4948err:
4949 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4950 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4951
4952 return status;
4953}
4954
4955static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004956 struct net_device *dev, u32 filter_mask,
4957 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004958{
4959 struct be_adapter *adapter = netdev_priv(dev);
4960 int status = 0;
4961 u8 hsw_mode;
4962
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004963 /* BE and Lancer chips support VEB mode only */
4964 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004965 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4966 if (!pci_sriov_get_totalvfs(adapter->pdev))
4967 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004968 hsw_mode = PORT_FWD_TYPE_VEB;
4969 } else {
4970 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004971 adapter->if_handle, &hsw_mode,
4972 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004973 if (status)
4974 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004975
4976 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4977 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004978 }
4979
4980 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4981 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004982 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004983 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004984}
4985
Sathya Perlab7172412016-07-27 05:26:18 -04004986static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
4987 void (*func)(struct work_struct *))
4988{
4989 struct be_cmd_work *work;
4990
4991 work = kzalloc(sizeof(*work), GFP_ATOMIC);
4992 if (!work) {
4993 dev_err(&adapter->pdev->dev,
4994 "be_work memory allocation failed\n");
4995 return NULL;
4996 }
4997
4998 INIT_WORK(&work->work, func);
4999 work->adapter = adapter;
5000 return work;
5001}
5002
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005003/* VxLAN offload Notes:
5004 *
5005 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5006 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5007 * is expected to work across all types of IP tunnels once exported. Skyhawk
5008 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305009 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5010 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5011 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005012 *
5013 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5014 * adds more than one port, disable offloads and don't re-enable them again
5015 * until after all the tunnels are removed.
5016 */
Sathya Perlab7172412016-07-27 05:26:18 -04005017static void be_work_add_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305018{
Sathya Perlab7172412016-07-27 05:26:18 -04005019 struct be_cmd_work *cmd_work =
5020 container_of(work, struct be_cmd_work, work);
5021 struct be_adapter *adapter = cmd_work->adapter;
5022 struct net_device *netdev = adapter->netdev;
Sathya Perlac9c47142014-03-27 10:46:19 +05305023 struct device *dev = &adapter->pdev->dev;
Sathya Perlab7172412016-07-27 05:26:18 -04005024 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305025 int status;
5026
Jiri Benc1e5b3112015-09-17 16:11:13 +02005027 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5028 adapter->vxlan_port_aliases++;
Sathya Perlab7172412016-07-27 05:26:18 -04005029 goto done;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005030 }
5031
Sathya Perlac9c47142014-03-27 10:46:19 +05305032 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305033 dev_info(dev,
5034 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005035 dev_info(dev, "Disabling VxLAN offloads\n");
5036 adapter->vxlan_port_count++;
5037 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305038 }
5039
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005040 if (adapter->vxlan_port_count++ >= 1)
Sathya Perlab7172412016-07-27 05:26:18 -04005041 goto done;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005042
Sathya Perlac9c47142014-03-27 10:46:19 +05305043 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5044 OP_CONVERT_NORMAL_TO_TUNNEL);
5045 if (status) {
5046 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5047 goto err;
5048 }
5049
5050 status = be_cmd_set_vxlan_port(adapter, port);
5051 if (status) {
5052 dev_warn(dev, "Failed to add VxLAN port\n");
5053 goto err;
5054 }
5055 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5056 adapter->vxlan_port = port;
5057
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005058 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5059 NETIF_F_TSO | NETIF_F_TSO6 |
5060 NETIF_F_GSO_UDP_TUNNEL;
5061 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305062 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005063
Sathya Perlac9c47142014-03-27 10:46:19 +05305064 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5065 be16_to_cpu(port));
Sathya Perlab7172412016-07-27 05:26:18 -04005066 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305067err:
5068 be_disable_vxlan_offloads(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04005069done:
5070 kfree(cmd_work);
Sathya Perlac9c47142014-03-27 10:46:19 +05305071}
5072
Sathya Perlab7172412016-07-27 05:26:18 -04005073static void be_work_del_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305074{
Sathya Perlab7172412016-07-27 05:26:18 -04005075 struct be_cmd_work *cmd_work =
5076 container_of(work, struct be_cmd_work, work);
5077 struct be_adapter *adapter = cmd_work->adapter;
5078 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305079
5080 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005081 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305082
Jiri Benc1e5b3112015-09-17 16:11:13 +02005083 if (adapter->vxlan_port_aliases) {
5084 adapter->vxlan_port_aliases--;
Sathya Perlab7172412016-07-27 05:26:18 -04005085 goto out;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005086 }
5087
Sathya Perlac9c47142014-03-27 10:46:19 +05305088 be_disable_vxlan_offloads(adapter);
5089
5090 dev_info(&adapter->pdev->dev,
5091 "Disabled VxLAN offloads for UDP port %d\n",
5092 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005093done:
5094 adapter->vxlan_port_count--;
Sathya Perlab7172412016-07-27 05:26:18 -04005095out:
5096 kfree(cmd_work);
5097}
5098
5099static void be_cfg_vxlan_port(struct net_device *netdev,
5100 struct udp_tunnel_info *ti,
5101 void (*func)(struct work_struct *))
5102{
5103 struct be_adapter *adapter = netdev_priv(netdev);
5104 struct be_cmd_work *cmd_work;
5105
5106 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5107 return;
5108
5109 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5110 return;
5111
5112 cmd_work = be_alloc_work(adapter, func);
5113 if (cmd_work) {
5114 cmd_work->info.vxlan_port = ti->port;
5115 queue_work(be_wq, &cmd_work->work);
5116 }
5117}
5118
5119static void be_del_vxlan_port(struct net_device *netdev,
5120 struct udp_tunnel_info *ti)
5121{
5122 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5123}
5124
5125static void be_add_vxlan_port(struct net_device *netdev,
5126 struct udp_tunnel_info *ti)
5127{
5128 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
Sathya Perlac9c47142014-03-27 10:46:19 +05305129}
Joe Stringer725d5482014-11-13 16:38:13 -08005130
Jesse Gross5f352272014-12-23 22:37:26 -08005131static netdev_features_t be_features_check(struct sk_buff *skb,
5132 struct net_device *dev,
5133 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005134{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305135 struct be_adapter *adapter = netdev_priv(dev);
5136 u8 l4_hdr = 0;
5137
5138 /* The code below restricts offload features for some tunneled packets.
5139 * Offload features for normal (non tunnel) packets are unchanged.
5140 */
5141 if (!skb->encapsulation ||
5142 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5143 return features;
5144
5145 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5146 * should disable tunnel offload features if it's not a VxLAN packet,
5147 * as tunnel offloads have been enabled only for VxLAN. This is done to
5148 * allow other tunneled traffic like GRE work fine while VxLAN
5149 * offloads are configured in Skyhawk-R.
5150 */
5151 switch (vlan_get_protocol(skb)) {
5152 case htons(ETH_P_IP):
5153 l4_hdr = ip_hdr(skb)->protocol;
5154 break;
5155 case htons(ETH_P_IPV6):
5156 l4_hdr = ipv6_hdr(skb)->nexthdr;
5157 break;
5158 default:
5159 return features;
5160 }
5161
5162 if (l4_hdr != IPPROTO_UDP ||
5163 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5164 skb->inner_protocol != htons(ETH_P_TEB) ||
5165 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
Sabrina Dubroca096de2f2017-01-03 16:26:04 +01005166 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5167 !adapter->vxlan_port ||
5168 udp_hdr(skb)->dest != adapter->vxlan_port)
Tom Herberta1882222015-12-14 11:19:43 -08005169 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305170
5171 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005172}
Sathya Perlac9c47142014-03-27 10:46:19 +05305173
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305174static int be_get_phys_port_id(struct net_device *dev,
5175 struct netdev_phys_item_id *ppid)
5176{
5177 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5178 struct be_adapter *adapter = netdev_priv(dev);
5179 u8 *id;
5180
5181 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5182 return -ENOSPC;
5183
5184 ppid->id[0] = adapter->hba_port_num + 1;
5185 id = &ppid->id[1];
5186 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5187 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5188 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5189
5190 ppid->id_len = id_len;
5191
5192 return 0;
5193}
5194
Sathya Perlab7172412016-07-27 05:26:18 -04005195static void be_set_rx_mode(struct net_device *dev)
5196{
5197 struct be_adapter *adapter = netdev_priv(dev);
5198 struct be_cmd_work *work;
5199
5200 work = be_alloc_work(adapter, be_work_set_rx_mode);
5201 if (work)
5202 queue_work(be_wq, &work->work);
5203}
5204
stephen hemmingere5686ad2012-01-05 19:10:25 +00005205static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005206 .ndo_open = be_open,
5207 .ndo_stop = be_close,
5208 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005209 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005210 .ndo_set_mac_address = be_mac_addr_set,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005211 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005212 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005213 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5214 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005215 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005216 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005217 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005218 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305219 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005220 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005221#ifdef CONFIG_NET_POLL_CONTROLLER
5222 .ndo_poll_controller = be_netpoll,
5223#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005224 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5225 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305226#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305227 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305228#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005229 .ndo_udp_tunnel_add = be_add_vxlan_port,
5230 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005231 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305232 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005233};
5234
5235static void be_netdev_init(struct net_device *netdev)
5236{
5237 struct be_adapter *adapter = netdev_priv(netdev);
5238
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005239 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005240 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005241 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305242 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005243 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005244
5245 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005246 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005247
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005248 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005249 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005250
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005251 netdev->priv_flags |= IFF_UNICAST_FLT;
5252
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005253 netdev->flags |= IFF_MULTICAST;
5254
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305255 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005256
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005257 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005258
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005259 netdev->ethtool_ops = &be_ethtool_ops;
Jarod Wilsond894be52016-10-20 13:55:16 -04005260
5261 /* MTU range: 256 - 9000 */
5262 netdev->min_mtu = BE_MIN_MTU;
5263 netdev->max_mtu = BE_MAX_MTU;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005264}
5265
Kalesh AP87ac1a52015-02-23 04:20:15 -05005266static void be_cleanup(struct be_adapter *adapter)
5267{
5268 struct net_device *netdev = adapter->netdev;
5269
5270 rtnl_lock();
5271 netif_device_detach(netdev);
5272 if (netif_running(netdev))
5273 be_close(netdev);
5274 rtnl_unlock();
5275
5276 be_clear(adapter);
5277}
5278
Kalesh AP484d76f2015-02-23 04:20:14 -05005279static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005280{
Kalesh APd0e1b312015-02-23 04:20:12 -05005281 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005282 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005283
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005284 status = be_setup(adapter);
5285 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005286 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005287
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005288 rtnl_lock();
5289 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005290 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005291 rtnl_unlock();
5292
5293 if (status)
5294 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005295
Kalesh APd0e1b312015-02-23 04:20:12 -05005296 netif_device_attach(netdev);
5297
Kalesh AP484d76f2015-02-23 04:20:14 -05005298 return 0;
5299}
5300
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305301static void be_soft_reset(struct be_adapter *adapter)
5302{
5303 u32 val;
5304
5305 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5306 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5307 val |= SLIPORT_SOFTRESET_SR_MASK;
5308 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5309}
5310
5311static bool be_err_is_recoverable(struct be_adapter *adapter)
5312{
5313 struct be_error_recovery *err_rec = &adapter->error_recovery;
5314 unsigned long initial_idle_time =
5315 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5316 unsigned long recovery_interval =
5317 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5318 u16 ue_err_code;
5319 u32 val;
5320
5321 val = be_POST_stage_get(adapter);
5322 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5323 return false;
5324 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5325 if (ue_err_code == 0)
5326 return false;
5327
5328 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5329 ue_err_code);
5330
5331 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5332 dev_err(&adapter->pdev->dev,
5333 "Cannot recover within %lu sec from driver load\n",
5334 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5335 return false;
5336 }
5337
5338 if (err_rec->last_recovery_time &&
5339 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5340 dev_err(&adapter->pdev->dev,
5341 "Cannot recover within %lu sec from last recovery\n",
5342 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5343 return false;
5344 }
5345
5346 if (ue_err_code == err_rec->last_err_code) {
5347 dev_err(&adapter->pdev->dev,
5348 "Cannot recover from a consecutive TPE error\n");
5349 return false;
5350 }
5351
5352 err_rec->last_recovery_time = jiffies;
5353 err_rec->last_err_code = ue_err_code;
5354 return true;
5355}
5356
5357static int be_tpe_recover(struct be_adapter *adapter)
5358{
5359 struct be_error_recovery *err_rec = &adapter->error_recovery;
5360 int status = -EAGAIN;
5361 u32 val;
5362
5363 switch (err_rec->recovery_state) {
5364 case ERR_RECOVERY_ST_NONE:
5365 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5366 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5367 break;
5368
5369 case ERR_RECOVERY_ST_DETECT:
5370 val = be_POST_stage_get(adapter);
5371 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5372 POST_STAGE_RECOVERABLE_ERR) {
5373 dev_err(&adapter->pdev->dev,
5374 "Unrecoverable HW error detected: 0x%x\n", val);
5375 status = -EINVAL;
5376 err_rec->resched_delay = 0;
5377 break;
5378 }
5379
5380 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5381
5382 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5383 * milliseconds before it checks for final error status in
5384 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5385 * If it does, then PF0 initiates a Soft Reset.
5386 */
5387 if (adapter->pf_num == 0) {
5388 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5389 err_rec->resched_delay = err_rec->ue_to_reset_time -
5390 ERR_RECOVERY_UE_DETECT_DURATION;
5391 break;
5392 }
5393
5394 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5395 err_rec->resched_delay = err_rec->ue_to_poll_time -
5396 ERR_RECOVERY_UE_DETECT_DURATION;
5397 break;
5398
5399 case ERR_RECOVERY_ST_RESET:
5400 if (!be_err_is_recoverable(adapter)) {
5401 dev_err(&adapter->pdev->dev,
5402 "Failed to meet recovery criteria\n");
5403 status = -EIO;
5404 err_rec->resched_delay = 0;
5405 break;
5406 }
5407 be_soft_reset(adapter);
5408 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5409 err_rec->resched_delay = err_rec->ue_to_poll_time -
5410 err_rec->ue_to_reset_time;
5411 break;
5412
5413 case ERR_RECOVERY_ST_PRE_POLL:
5414 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5415 err_rec->resched_delay = 0;
5416 status = 0; /* done */
5417 break;
5418
5419 default:
5420 status = -EINVAL;
5421 err_rec->resched_delay = 0;
5422 break;
5423 }
5424
5425 return status;
5426}
5427
Kalesh AP484d76f2015-02-23 04:20:14 -05005428static int be_err_recover(struct be_adapter *adapter)
5429{
Kalesh AP484d76f2015-02-23 04:20:14 -05005430 int status;
5431
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305432 if (!lancer_chip(adapter)) {
5433 if (!adapter->error_recovery.recovery_supported ||
5434 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5435 return -EIO;
5436 status = be_tpe_recover(adapter);
5437 if (status)
5438 goto err;
5439 }
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305440
5441 /* Wait for adapter to reach quiescent state before
5442 * destroying queues
5443 */
5444 status = be_fw_wait_ready(adapter);
5445 if (status)
5446 goto err;
5447
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305448 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5449
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305450 be_cleanup(adapter);
5451
Kalesh AP484d76f2015-02-23 04:20:14 -05005452 status = be_resume(adapter);
5453 if (status)
5454 goto err;
5455
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305456 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5457
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005458err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005459 return status;
5460}
5461
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005462static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005463{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305464 struct be_error_recovery *err_rec =
5465 container_of(work, struct be_error_recovery,
5466 err_detection_work.work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005467 struct be_adapter *adapter =
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305468 container_of(err_rec, struct be_adapter,
5469 error_recovery);
5470 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305471 struct device *dev = &adapter->pdev->dev;
5472 int recovery_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005473
5474 be_detect_error(adapter);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305475 if (!be_check_error(adapter, BE_ERROR_HW))
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305476 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005477
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305478 recovery_status = be_err_recover(adapter);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305479 if (!recovery_status) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305480 err_rec->recovery_retries = 0;
5481 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305482 dev_info(dev, "Adapter recovery successful\n");
5483 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305484 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5485 /* BEx/SH recovery state machine */
5486 if (adapter->pf_num == 0 &&
5487 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5488 dev_err(&adapter->pdev->dev,
5489 "Adapter recovery in progress\n");
5490 resched_delay = err_rec->resched_delay;
5491 goto reschedule_task;
5492 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305493 /* For VFs, check if PF have allocated resources
5494 * every second.
5495 */
5496 dev_err(dev, "Re-trying adapter recovery\n");
5497 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305498 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5499 ERR_RECOVERY_MAX_RETRY_COUNT) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305500 /* In case of another error during recovery, it takes 30 sec
5501 * for adapter to come out of error. Retry error recovery after
5502 * this time interval.
5503 */
5504 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305505 resched_delay = ERR_RECOVERY_RETRY_DELAY;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305506 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305507 } else {
5508 dev_err(dev, "Adapter recovery failed\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305509 dev_err(dev, "Please reboot server to recover\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005510 }
5511
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305512 return;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305513
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305514reschedule_task:
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305515 be_schedule_err_detection(adapter, resched_delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005516}
5517
Vasundhara Volam21252372015-02-06 08:18:42 -05005518static void be_log_sfp_info(struct be_adapter *adapter)
5519{
5520 int status;
5521
5522 status = be_cmd_query_sfp_info(adapter);
5523 if (!status) {
5524 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305525 "Port %c: %s Vendor: %s part no: %s",
5526 adapter->port_name,
5527 be_misconfig_evt_port_state[adapter->phy_state],
5528 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005529 adapter->phy.vendor_pn);
5530 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305531 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005532}
5533
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005534static void be_worker(struct work_struct *work)
5535{
5536 struct be_adapter *adapter =
5537 container_of(work, struct be_adapter, work.work);
5538 struct be_rx_obj *rxo;
5539 int i;
5540
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005541 if (be_physfn(adapter) &&
5542 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5543 be_cmd_get_die_temperature(adapter);
5544
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005545 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005546 * mcc completions
5547 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005548 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005549 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005550 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005551 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005552 goto reschedule;
5553 }
5554
5555 if (!adapter->stats_cmd_sent) {
5556 if (lancer_chip(adapter))
5557 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305558 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005559 else
5560 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5561 }
5562
5563 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305564 /* Replenish RX-queues starved due to memory
5565 * allocation failures.
5566 */
5567 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305568 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005569 }
5570
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005571 /* EQ-delay update for Skyhawk is done while notifying EQ */
5572 if (!skyhawk_chip(adapter))
5573 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005574
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305575 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005576 be_log_sfp_info(adapter);
5577
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005578reschedule:
5579 adapter->work_counter++;
Sathya Perlab7172412016-07-27 05:26:18 -04005580 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005581}
5582
Sathya Perla78fad34e2015-02-23 04:20:08 -05005583static void be_unmap_pci_bars(struct be_adapter *adapter)
5584{
5585 if (adapter->csr)
5586 pci_iounmap(adapter->pdev, adapter->csr);
5587 if (adapter->db)
5588 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005589 if (adapter->pcicfg && adapter->pcicfg_mapped)
5590 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005591}
5592
5593static int db_bar(struct be_adapter *adapter)
5594{
Kalesh AP18c57c72015-05-06 05:30:38 -04005595 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005596 return 0;
5597 else
5598 return 4;
5599}
5600
5601static int be_roce_map_pci_bars(struct be_adapter *adapter)
5602{
5603 if (skyhawk_chip(adapter)) {
5604 adapter->roce_db.size = 4096;
5605 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5606 db_bar(adapter));
5607 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5608 db_bar(adapter));
5609 }
5610 return 0;
5611}
5612
5613static int be_map_pci_bars(struct be_adapter *adapter)
5614{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005615 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005616 u8 __iomem *addr;
5617 u32 sli_intf;
5618
5619 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5620 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5621 SLI_INTF_FAMILY_SHIFT;
5622 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5623
5624 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005625 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005626 if (!adapter->csr)
5627 return -ENOMEM;
5628 }
5629
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005630 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005631 if (!addr)
5632 goto pci_map_err;
5633 adapter->db = addr;
5634
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005635 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5636 if (be_physfn(adapter)) {
5637 /* PCICFG is the 2nd BAR in BE2 */
5638 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5639 if (!addr)
5640 goto pci_map_err;
5641 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005642 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005643 } else {
5644 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005645 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005646 }
5647 }
5648
Sathya Perla78fad34e2015-02-23 04:20:08 -05005649 be_roce_map_pci_bars(adapter);
5650 return 0;
5651
5652pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005653 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005654 be_unmap_pci_bars(adapter);
5655 return -ENOMEM;
5656}
5657
5658static void be_drv_cleanup(struct be_adapter *adapter)
5659{
5660 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5661 struct device *dev = &adapter->pdev->dev;
5662
5663 if (mem->va)
5664 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5665
5666 mem = &adapter->rx_filter;
5667 if (mem->va)
5668 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5669
5670 mem = &adapter->stats_cmd;
5671 if (mem->va)
5672 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5673}
5674
5675/* Allocate and initialize various fields in be_adapter struct */
5676static int be_drv_init(struct be_adapter *adapter)
5677{
5678 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5679 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5680 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5681 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5682 struct device *dev = &adapter->pdev->dev;
5683 int status = 0;
5684
5685 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305686 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5687 &mbox_mem_alloc->dma,
5688 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005689 if (!mbox_mem_alloc->va)
5690 return -ENOMEM;
5691
5692 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5693 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5694 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005695
5696 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5697 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5698 &rx_filter->dma, GFP_KERNEL);
5699 if (!rx_filter->va) {
5700 status = -ENOMEM;
5701 goto free_mbox;
5702 }
5703
5704 if (lancer_chip(adapter))
5705 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5706 else if (BE2_chip(adapter))
5707 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5708 else if (BE3_chip(adapter))
5709 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5710 else
5711 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5712 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5713 &stats_cmd->dma, GFP_KERNEL);
5714 if (!stats_cmd->va) {
5715 status = -ENOMEM;
5716 goto free_rx_filter;
5717 }
5718
5719 mutex_init(&adapter->mbox_lock);
Sathya Perlab7172412016-07-27 05:26:18 -04005720 mutex_init(&adapter->mcc_lock);
5721 mutex_init(&adapter->rx_filter_lock);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005722 spin_lock_init(&adapter->mcc_cq_lock);
5723 init_completion(&adapter->et_cmd_compl);
5724
5725 pci_save_state(adapter->pdev);
5726
5727 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305728
5729 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5730 adapter->error_recovery.resched_delay = 0;
5731 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005732 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005733
5734 adapter->rx_fc = true;
5735 adapter->tx_fc = true;
5736
5737 /* Must be a power of 2 or else MODULO will BUG_ON */
5738 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005739
5740 return 0;
5741
5742free_rx_filter:
5743 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5744free_mbox:
5745 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5746 mbox_mem_alloc->dma);
5747 return status;
5748}
5749
5750static void be_remove(struct pci_dev *pdev)
5751{
5752 struct be_adapter *adapter = pci_get_drvdata(pdev);
5753
5754 if (!adapter)
5755 return;
5756
5757 be_roce_dev_remove(adapter);
5758 be_intr_set(adapter, false);
5759
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005760 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005761
5762 unregister_netdev(adapter->netdev);
5763
5764 be_clear(adapter);
5765
Somnath Koturf72099e2016-09-07 19:57:50 +05305766 if (!pci_vfs_assigned(adapter->pdev))
5767 be_cmd_reset_function(adapter);
5768
Sathya Perla78fad34e2015-02-23 04:20:08 -05005769 /* tell fw we're done with firing cmds */
5770 be_cmd_fw_clean(adapter);
5771
5772 be_unmap_pci_bars(adapter);
5773 be_drv_cleanup(adapter);
5774
5775 pci_disable_pcie_error_reporting(pdev);
5776
5777 pci_release_regions(pdev);
5778 pci_disable_device(pdev);
5779
5780 free_netdev(adapter->netdev);
5781}
5782
Arnd Bergmann9a032592015-05-18 23:06:45 +02005783static ssize_t be_hwmon_show_temp(struct device *dev,
5784 struct device_attribute *dev_attr,
5785 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305786{
5787 struct be_adapter *adapter = dev_get_drvdata(dev);
5788
5789 /* Unit: millidegree Celsius */
5790 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5791 return -EIO;
5792 else
5793 return sprintf(buf, "%u\n",
5794 adapter->hwmon_info.be_on_die_temp * 1000);
5795}
5796
5797static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5798 be_hwmon_show_temp, NULL, 1);
5799
5800static struct attribute *be_hwmon_attrs[] = {
5801 &sensor_dev_attr_temp1_input.dev_attr.attr,
5802 NULL
5803};
5804
5805ATTRIBUTE_GROUPS(be_hwmon);
5806
Sathya Perlad3791422012-09-28 04:39:44 +00005807static char *mc_name(struct be_adapter *adapter)
5808{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305809 char *str = ""; /* default */
5810
5811 switch (adapter->mc_type) {
5812 case UMC:
5813 str = "UMC";
5814 break;
5815 case FLEX10:
5816 str = "FLEX10";
5817 break;
5818 case vNIC1:
5819 str = "vNIC-1";
5820 break;
5821 case nPAR:
5822 str = "nPAR";
5823 break;
5824 case UFP:
5825 str = "UFP";
5826 break;
5827 case vNIC2:
5828 str = "vNIC-2";
5829 break;
5830 default:
5831 str = "";
5832 }
5833
5834 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005835}
5836
5837static inline char *func_name(struct be_adapter *adapter)
5838{
5839 return be_physfn(adapter) ? "PF" : "VF";
5840}
5841
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005842static inline char *nic_name(struct pci_dev *pdev)
5843{
5844 switch (pdev->device) {
5845 case OC_DEVICE_ID1:
5846 return OC_NAME;
5847 case OC_DEVICE_ID2:
5848 return OC_NAME_BE;
5849 case OC_DEVICE_ID3:
5850 case OC_DEVICE_ID4:
5851 return OC_NAME_LANCER;
5852 case BE_DEVICE_ID2:
5853 return BE3_NAME;
5854 case OC_DEVICE_ID5:
5855 case OC_DEVICE_ID6:
5856 return OC_NAME_SH;
5857 default:
5858 return BE_NAME;
5859 }
5860}
5861
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005862static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005863{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005864 struct be_adapter *adapter;
5865 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005866 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005867
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305868 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5869
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005870 status = pci_enable_device(pdev);
5871 if (status)
5872 goto do_none;
5873
5874 status = pci_request_regions(pdev, DRV_NAME);
5875 if (status)
5876 goto disable_dev;
5877 pci_set_master(pdev);
5878
Sathya Perla7f640062012-06-05 19:37:20 +00005879 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305880 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005881 status = -ENOMEM;
5882 goto rel_reg;
5883 }
5884 adapter = netdev_priv(netdev);
5885 adapter->pdev = pdev;
5886 pci_set_drvdata(pdev, adapter);
5887 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005888 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005889
Russell King4c15c242013-06-26 23:49:11 +01005890 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005891 if (!status) {
5892 netdev->features |= NETIF_F_HIGHDMA;
5893 } else {
Russell King4c15c242013-06-26 23:49:11 +01005894 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005895 if (status) {
5896 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5897 goto free_netdev;
5898 }
5899 }
5900
Kalesh AP2f951a92014-09-12 17:39:21 +05305901 status = pci_enable_pcie_error_reporting(pdev);
5902 if (!status)
5903 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005904
Sathya Perla78fad34e2015-02-23 04:20:08 -05005905 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005906 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005907 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005908
Sathya Perla78fad34e2015-02-23 04:20:08 -05005909 status = be_drv_init(adapter);
5910 if (status)
5911 goto unmap_bars;
5912
Sathya Perla5fb379e2009-06-18 00:02:59 +00005913 status = be_setup(adapter);
5914 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005915 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005916
Sathya Perla3abcded2010-10-03 22:12:27 -07005917 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005918 status = register_netdev(netdev);
5919 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005920 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005921
Parav Pandit045508a2012-03-26 14:27:13 +00005922 be_roce_dev_add(adapter);
5923
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305924 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305925 adapter->error_recovery.probe_time = jiffies;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005926
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305927 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005928 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305929 adapter->hwmon_info.hwmon_dev =
5930 devm_hwmon_device_register_with_groups(&pdev->dev,
5931 DRV_NAME,
5932 adapter,
5933 be_hwmon_groups);
5934 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5935 }
5936
Sathya Perlad3791422012-09-28 04:39:44 +00005937 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005938 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005939
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005940 return 0;
5941
Sathya Perla5fb379e2009-06-18 00:02:59 +00005942unsetup:
5943 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005944drv_cleanup:
5945 be_drv_cleanup(adapter);
5946unmap_bars:
5947 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005948free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005949 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005950rel_reg:
5951 pci_release_regions(pdev);
5952disable_dev:
5953 pci_disable_device(pdev);
5954do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005955 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005956 return status;
5957}
5958
5959static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5960{
5961 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005962
Ajit Khaparded4360d62013-11-22 12:51:09 -06005963 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005964 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005965
Kalesh AP87ac1a52015-02-23 04:20:15 -05005966 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005967
5968 pci_save_state(pdev);
5969 pci_disable_device(pdev);
5970 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5971 return 0;
5972}
5973
Kalesh AP484d76f2015-02-23 04:20:14 -05005974static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005975{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005976 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005977 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005978
5979 status = pci_enable_device(pdev);
5980 if (status)
5981 return status;
5982
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005983 pci_restore_state(pdev);
5984
Kalesh AP484d76f2015-02-23 04:20:14 -05005985 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005986 if (status)
5987 return status;
5988
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305989 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005990
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005991 return 0;
5992}
5993
Sathya Perla82456b02010-02-17 01:35:37 +00005994/*
5995 * An FLR will stop BE from DMAing any data.
5996 */
5997static void be_shutdown(struct pci_dev *pdev)
5998{
5999 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006000
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006001 if (!adapter)
6002 return;
Sathya Perla82456b02010-02-17 01:35:37 +00006003
Devesh Sharmad114f992014-06-10 19:32:15 +05306004 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00006005 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006006 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00006007
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006008 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006009
Ajit Khaparde57841862011-04-06 18:08:43 +00006010 be_cmd_reset_function(adapter);
6011
Sathya Perla82456b02010-02-17 01:35:37 +00006012 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006013}
6014
Sathya Perlacf588472010-02-14 21:22:01 +00006015static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05306016 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00006017{
6018 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006019
6020 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6021
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306022 be_roce_dev_remove(adapter);
6023
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306024 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6025 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00006026
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006027 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00006028
Kalesh AP87ac1a52015-02-23 04:20:15 -05006029 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006030 }
Sathya Perlacf588472010-02-14 21:22:01 +00006031
6032 if (state == pci_channel_io_perm_failure)
6033 return PCI_ERS_RESULT_DISCONNECT;
6034
6035 pci_disable_device(pdev);
6036
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006037 /* The error could cause the FW to trigger a flash debug dump.
6038 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006039 * can cause it not to recover; wait for it to finish.
6040 * Wait only for first function as it is needed only once per
6041 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006042 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006043 if (pdev->devfn == 0)
6044 ssleep(30);
6045
Sathya Perlacf588472010-02-14 21:22:01 +00006046 return PCI_ERS_RESULT_NEED_RESET;
6047}
6048
6049static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6050{
6051 struct be_adapter *adapter = pci_get_drvdata(pdev);
6052 int status;
6053
6054 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006055
6056 status = pci_enable_device(pdev);
6057 if (status)
6058 return PCI_ERS_RESULT_DISCONNECT;
6059
6060 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006061 pci_restore_state(pdev);
6062
6063 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00006064 dev_info(&adapter->pdev->dev,
6065 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006066 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006067 if (status)
6068 return PCI_ERS_RESULT_DISCONNECT;
6069
Sathya Perlad6b6d982012-09-05 01:56:48 +00006070 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306071 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00006072 return PCI_ERS_RESULT_RECOVERED;
6073}
6074
6075static void be_eeh_resume(struct pci_dev *pdev)
6076{
6077 int status = 0;
6078 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006079
6080 dev_info(&adapter->pdev->dev, "EEH resume\n");
6081
6082 pci_save_state(pdev);
6083
Kalesh AP484d76f2015-02-23 04:20:14 -05006084 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006085 if (status)
6086 goto err;
6087
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306088 be_roce_dev_add(adapter);
6089
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306090 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00006091 return;
6092err:
6093 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006094}
6095
Vasundhara Volamace40af2015-03-04 00:44:34 -05006096static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6097{
6098 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006099 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05006100 int status;
6101
6102 if (!num_vfs)
6103 be_vf_clear(adapter);
6104
6105 adapter->num_vfs = num_vfs;
6106
6107 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6108 dev_warn(&pdev->dev,
6109 "Cannot disable VFs while they are assigned\n");
6110 return -EBUSY;
6111 }
6112
6113 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6114 * are equally distributed across the max-number of VFs. The user may
6115 * request only a subset of the max-vfs to be enabled.
6116 * Based on num_vfs, redistribute the resources across num_vfs so that
6117 * each VF will have access to more number of resources.
6118 * This facility is not available in BE3 FW.
6119 * Also, this is done by FW in Lancer chip.
6120 */
6121 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006122 be_calculate_vf_res(adapter, adapter->num_vfs,
6123 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006124 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006125 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006126 if (status)
6127 dev_err(&pdev->dev,
6128 "Failed to optimize SR-IOV resources\n");
6129 }
6130
6131 status = be_get_resources(adapter);
6132 if (status)
6133 return be_cmd_status(status);
6134
6135 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6136 rtnl_lock();
6137 status = be_update_queues(adapter);
6138 rtnl_unlock();
6139 if (status)
6140 return be_cmd_status(status);
6141
6142 if (adapter->num_vfs)
6143 status = be_vf_setup(adapter);
6144
6145 if (!status)
6146 return adapter->num_vfs;
6147
6148 return 0;
6149}
6150
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006151static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006152 .error_detected = be_eeh_err_detected,
6153 .slot_reset = be_eeh_reset,
6154 .resume = be_eeh_resume,
6155};
6156
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006157static struct pci_driver be_driver = {
6158 .name = DRV_NAME,
6159 .id_table = be_dev_ids,
6160 .probe = be_probe,
6161 .remove = be_remove,
6162 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006163 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006164 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006165 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006166 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006167};
6168
6169static int __init be_init_module(void)
6170{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306171 int status;
6172
Joe Perches8e95a202009-12-03 07:58:21 +00006173 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6174 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006175 printk(KERN_WARNING DRV_NAME
6176 " : Module param rx_frag_size must be 2048/4096/8192."
6177 " Using 2048\n");
6178 rx_frag_size = 2048;
6179 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006180
Vasundhara Volamace40af2015-03-04 00:44:34 -05006181 if (num_vfs > 0) {
6182 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6183 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6184 }
6185
Sathya Perlab7172412016-07-27 05:26:18 -04006186 be_wq = create_singlethread_workqueue("be_wq");
6187 if (!be_wq) {
6188 pr_warn(DRV_NAME "workqueue creation failed\n");
6189 return -1;
6190 }
6191
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306192 be_err_recovery_workq =
6193 create_singlethread_workqueue("be_err_recover");
6194 if (!be_err_recovery_workq)
6195 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6196
6197 status = pci_register_driver(&be_driver);
6198 if (status) {
6199 destroy_workqueue(be_wq);
6200 be_destroy_err_recovery_workq();
6201 }
6202 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006203}
6204module_init(be_init_module);
6205
6206static void __exit be_exit_module(void)
6207{
6208 pci_unregister_driver(&be_driver);
Sathya Perlab7172412016-07-27 05:26:18 -04006209
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306210 be_destroy_err_recovery_workq();
6211
Sathya Perlab7172412016-07-27 05:26:18 -04006212 if (be_wq)
6213 destroy_workqueue(be_wq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006214}
6215module_exit(be_exit_module);