blob: f3a09ab559004b80106e957674cb0d6acbf3ea65 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053044/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
Wei Yongjune6053dd2016-09-25 15:40:36 +000047static struct workqueue_struct *be_err_recovery_workq;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053048
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070050 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070051 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070052 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000054 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000055 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000056 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000057 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070058 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
Sathya Perlab7172412016-07-27 05:26:18 -040061
62/* Workqueue used by all functions for defering cmd calls to the adapter */
Wei Yongjune6053dd2016-09-25 15:40:36 +000063static struct workqueue_struct *be_wq;
Sathya Perlab7172412016-07-27 05:26:18 -040064
Ajit Khaparde7c185272010-07-29 06:16:33 +000065/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070066static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000067 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053095 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000099};
Kalesh APe2fb1af2014-09-19 15:46:58 +0530100
Ajit Khaparde7c185272010-07-29 06:16:33 +0000101/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -0700102static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +0000103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530124 "ECRC",
125 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700126 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000134 "Unknown"
135};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530145
Sathya Perla1cfafab2012-02-23 18:50:15 +0000146 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000149 mem->va = NULL;
150 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530154 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000165 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return 0;
167}
168
Somnath Kotur68c45a22013-03-14 02:42:07 +0000169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170{
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530174 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000179 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000181 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000183
Sathya Perladb3ea782011-08-22 19:41:52 +0000184 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530196 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530207
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000213
214 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220{
221 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530222
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000228
229 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000230 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000241
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530242 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000243 return;
244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253}
254
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256{
257 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530258
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000262
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530263 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000264 return;
265
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270}
271
Suresh Reddy988d44b2016-09-07 19:57:52 +0530272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
Ivan Vecera1d0f1102017-01-06 20:30:02 +0100278 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +0530279 /* mac already added, skip addition */
280 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
281 return 0;
282 }
283 }
284
285 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
286 &adapter->pmac_id[0], 0);
287}
288
289static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
290{
291 int i;
292
293 /* Skip deletion if the programmed mac is
294 * being used in uc-list
295 */
296 for (i = 0; i < adapter->uc_macs; i++) {
297 if (adapter->pmac_id[i + 1] == pmac_id)
298 return;
299 }
300 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
301}
302
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700303static int be_mac_addr_set(struct net_device *netdev, void *p)
304{
305 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530306 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700307 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530308 int status;
309 u8 mac[ETH_ALEN];
Suresh Reddy988d44b2016-09-07 19:57:52 +0530310 u32 old_pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000312 if (!is_valid_ether_addr(addr->sa_data))
313 return -EADDRNOTAVAIL;
314
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530315 /* Proceed further only if, User provided MAC is different
316 * from active MAC
317 */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530318 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530319 return 0;
320
Ivan Vecera34393522017-01-13 22:38:29 +0100321 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
322 * address
323 */
324 if (BEx_chip(adapter) && be_virtfn(adapter) &&
325 !check_privilege(adapter, BE_PRIV_FILTMGMT))
326 return -EPERM;
327
Kalesh APbcc84142015-08-05 03:27:48 -0400328 /* if device is not running, copy MAC to netdev->dev_addr */
329 if (!netif_running(netdev))
330 goto done;
331
Sathya Perla5a712c12013-07-23 15:24:59 +0530332 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
333 * privilege or if PF did not provision the new MAC address.
334 * On BE3, this cmd will always fail if the VF doesn't have the
335 * FILTMGMT privilege. This failure is OK, only if the PF programmed
336 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000337 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530338 mutex_lock(&adapter->rx_filter_lock);
339 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
Sathya Perla5a712c12013-07-23 15:24:59 +0530340 if (!status) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530341
342 /* Delete the old programmed MAC. This call may fail if the
343 * old MAC was already deleted by the PF driver.
344 */
345 if (adapter->pmac_id[0] != old_pmac_id)
Suresh Reddy988d44b2016-09-07 19:57:52 +0530346 be_dev_mac_del(adapter, old_pmac_id);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000347 }
348
Suresh Reddy988d44b2016-09-07 19:57:52 +0530349 mutex_unlock(&adapter->rx_filter_lock);
Sathya Perla5a712c12013-07-23 15:24:59 +0530350 /* Decide if the new MAC is successfully activated only after
351 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000352 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530353 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
Suresh Reddyb188f092014-01-15 13:23:39 +0530354 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000355 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000356 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700357
Sathya Perla5a712c12013-07-23 15:24:59 +0530358 /* The MAC change did not happen, either due to lack of privilege
359 * or PF didn't pre-provision.
360 */
dingtianhong61d23e92013-12-30 15:40:43 +0800361 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530362 status = -EPERM;
363 goto err;
364 }
Ivan Vecera4993b392017-01-31 20:01:31 +0100365
366 /* Remember currently programmed MAC */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530367 ether_addr_copy(adapter->dev_mac, addr->sa_data);
Ivan Vecera4993b392017-01-31 20:01:31 +0100368done:
Kalesh APbcc84142015-08-05 03:27:48 -0400369 ether_addr_copy(netdev->dev_addr, addr->sa_data);
370 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000371 return 0;
372err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530373 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700374 return status;
375}
376
Sathya Perlaca34fe32012-11-06 17:48:56 +0000377/* BE2 supports only v0 cmd */
378static void *hw_stats_from_cmd(struct be_adapter *adapter)
379{
380 if (BE2_chip(adapter)) {
381 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
382
383 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500384 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000385 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
386
387 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500388 } else {
389 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
390
391 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000392 }
393}
394
395/* BE2 supports only v0 cmd */
396static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
397{
398 if (BE2_chip(adapter)) {
399 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
400
401 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500402 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000403 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
404
405 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500406 } else {
407 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
408
409 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000410 }
411}
412
413static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000415 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 drvs->rx_pause_frames = port_stats->rx_pause_frames;
424 drvs->rx_crc_errors = port_stats->rx_crc_errors;
425 drvs->rx_control_frames = port_stats->rx_control_frames;
426 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
427 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
428 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
429 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
430 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
431 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
432 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
433 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
434 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
435 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
436 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000437 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000440 drvs->rx_address_filtered =
441 port_stats->rx_address_filtered +
442 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
445
446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
448
449 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000450 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000453 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000454 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000455 drvs->forwarded_packets = rxf_stats->forwarded_packets;
456 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
458 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000459 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
460}
461
Sathya Perlaca34fe32012-11-06 17:48:56 +0000462static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000463{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000464 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
465 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
466 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000467 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000468 &rxf_stats->port[adapter->port_num];
469 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000470
Sathya Perlaac124ff2011-07-25 19:10:14 +0000471 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000472 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
473 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000474 drvs->rx_pause_frames = port_stats->rx_pause_frames;
475 drvs->rx_crc_errors = port_stats->rx_crc_errors;
476 drvs->rx_control_frames = port_stats->rx_control_frames;
477 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
478 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
479 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
480 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
481 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
482 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
483 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
484 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
485 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
486 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
487 drvs->rx_dropped_header_too_small =
488 port_stats->rx_dropped_header_too_small;
489 drvs->rx_input_fifo_overflow_drop =
490 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000491 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 drvs->rx_alignment_symbol_errors =
493 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000495 drvs->tx_pauseframes = port_stats->tx_pauseframes;
496 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000497 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000498 drvs->jabber_events = port_stats->jabber_events;
499 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000501 drvs->forwarded_packets = rxf_stats->forwarded_packets;
502 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000503 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
504 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000505 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
506}
507
Ajit Khaparde61000862013-10-03 16:16:33 -0500508static void populate_be_v2_stats(struct be_adapter *adapter)
509{
510 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
511 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
512 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
513 struct be_port_rxf_stats_v2 *port_stats =
514 &rxf_stats->port[adapter->port_num];
515 struct be_drv_stats *drvs = &adapter->drv_stats;
516
517 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
518 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
519 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
520 drvs->rx_pause_frames = port_stats->rx_pause_frames;
521 drvs->rx_crc_errors = port_stats->rx_crc_errors;
522 drvs->rx_control_frames = port_stats->rx_control_frames;
523 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
524 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
525 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
526 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
527 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
528 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
529 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
530 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
533 drvs->rx_dropped_header_too_small =
534 port_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop =
536 port_stats->rx_input_fifo_overflow_drop;
537 drvs->rx_address_filtered = port_stats->rx_address_filtered;
538 drvs->rx_alignment_symbol_errors =
539 port_stats->rx_alignment_symbol_errors;
540 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
541 drvs->tx_pauseframes = port_stats->tx_pauseframes;
542 drvs->tx_controlframes = port_stats->tx_controlframes;
543 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
544 drvs->jabber_events = port_stats->jabber_events;
545 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
546 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
547 drvs->forwarded_packets = rxf_stats->forwarded_packets;
548 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
549 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
550 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
551 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530552 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500553 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
554 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
555 drvs->rx_roce_frames = port_stats->roce_frames_received;
556 drvs->roce_drops_crc = port_stats->roce_drops_crc;
557 drvs->roce_drops_payload_len =
558 port_stats->roce_drops_payload_len;
559 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500560}
561
Selvin Xavier005d5692011-05-16 07:36:35 +0000562static void populate_lancer_stats(struct be_adapter *adapter)
563{
Selvin Xavier005d5692011-05-16 07:36:35 +0000564 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530565 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000566
567 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
568 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
569 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
570 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000571 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000573 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
574 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
575 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
576 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
577 drvs->rx_dropped_tcp_length =
578 pport_stats->rx_dropped_invalid_tcp_length;
579 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
580 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
581 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
582 drvs->rx_dropped_header_too_small =
583 pport_stats->rx_dropped_header_too_small;
584 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000585 drvs->rx_address_filtered =
586 pport_stats->rx_address_filtered +
587 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000588 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000589 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000590 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
591 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000592 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000593 drvs->forwarded_packets = pport_stats->num_forwards_lo;
594 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000595 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000596 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000597}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000598
Sathya Perla09c1c682011-08-22 19:41:53 +0000599static void accumulate_16bit_val(u32 *acc, u16 val)
600{
601#define lo(x) (x & 0xFFFF)
602#define hi(x) (x & 0xFFFF0000)
603 bool wrapped = val < lo(*acc);
604 u32 newacc = hi(*acc) + val;
605
606 if (wrapped)
607 newacc += 65536;
608 ACCESS_ONCE(*acc) = newacc;
609}
610
Jingoo Han4188e7d2013-08-05 18:02:02 +0900611static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530612 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000613{
614 if (!BEx_chip(adapter))
615 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
616 else
617 /* below erx HW counter can actually wrap around after
618 * 65535. Driver accumulates a 32-bit value
619 */
620 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
621 (u16)erx_stat);
622}
623
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000624void be_parse_stats(struct be_adapter *adapter)
625{
Ajit Khaparde61000862013-10-03 16:16:33 -0500626 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000627 struct be_rx_obj *rxo;
628 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000629 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000630
Sathya Perlaca34fe32012-11-06 17:48:56 +0000631 if (lancer_chip(adapter)) {
632 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000633 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000634 if (BE2_chip(adapter))
635 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500636 else if (BE3_chip(adapter))
637 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000638 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500639 else
640 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000641
Ajit Khaparde61000862013-10-03 16:16:33 -0500642 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000643 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000644 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
645 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000646 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000647 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000648}
649
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800650static void be_get_stats64(struct net_device *netdev,
651 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000653 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000654 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700655 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000656 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000657 u64 pkts, bytes;
658 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700659 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660
Sathya Perla3abcded2010-10-03 22:12:27 -0700661 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000662 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530663
Sathya Perlaab1594e2011-07-25 19:10:15 +0000664 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700665 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 pkts = rx_stats(rxo)->rx_pkts;
667 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700668 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 stats->rx_packets += pkts;
670 stats->rx_bytes += bytes;
671 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
672 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
673 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700674 }
675
Sathya Perla3c8def92011-06-12 20:01:58 +0000676 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000677 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530678
Sathya Perlaab1594e2011-07-25 19:10:15 +0000679 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700680 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000681 pkts = tx_stats(txo)->tx_pkts;
682 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700683 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000684 stats->tx_packets += pkts;
685 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000686 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687
688 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000689 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000690 drvs->rx_alignment_symbol_errors +
691 drvs->rx_in_range_errors +
692 drvs->rx_out_range_errors +
693 drvs->rx_frame_too_long +
694 drvs->rx_dropped_too_small +
695 drvs->rx_dropped_too_short +
696 drvs->rx_dropped_header_too_small +
697 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000698 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000701 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000702 drvs->rx_out_range_errors +
703 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000704
Sathya Perlaab1594e2011-07-25 19:10:15 +0000705 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
707 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000708 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000709
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 /* receiver fifo overrun */
711 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000712 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000713 drvs->rx_input_fifo_overflow_drop +
714 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715}
716
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000717void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 struct net_device *netdev = adapter->netdev;
720
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000721 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000722 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000723 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000725
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530726 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000727 netif_carrier_on(netdev);
728 else
729 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200730
731 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732}
733
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530734static int be_gso_hdr_len(struct sk_buff *skb)
735{
736 if (skb->encapsulation)
737 return skb_inner_transport_offset(skb) +
738 inner_tcp_hdrlen(skb);
739 return skb_transport_offset(skb) + tcp_hdrlen(skb);
740}
741
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500742static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700743{
Sathya Perla3c8def92011-06-12 20:01:58 +0000744 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530745 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
746 /* Account for headers which get duplicated in TSO pkt */
747 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000748
Sathya Perlaab1594e2011-07-25 19:10:15 +0000749 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000750 stats->tx_reqs++;
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530751 stats->tx_bytes += skb->len + dup_hdr_len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530752 stats->tx_pkts += tx_pkts;
753 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
754 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000755 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756}
757
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500758/* Returns number of WRBs needed for the skb */
759static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500761 /* +1 for the header wrb */
762 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763}
764
765static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
766{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500767 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
768 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
769 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
770 wrb->rsvd0 = 0;
771}
772
773/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
774 * to avoid the swap and shift/mask operations in wrb_fill().
775 */
776static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
777{
778 wrb->frag_pa_hi = 0;
779 wrb->frag_pa_lo = 0;
780 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000781 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782}
783
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000784static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530785 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000786{
787 u8 vlan_prio;
788 u16 vlan_tag;
789
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100790 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000791 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
792 /* If vlan priority provided by OS is NOT in available bmap */
793 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
794 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500795 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000796
797 return vlan_tag;
798}
799
Sathya Perlac9c47142014-03-27 10:46:19 +0530800/* Used only for IP tunnel packets */
801static u16 skb_inner_ip_proto(struct sk_buff *skb)
802{
803 return (inner_ip_hdr(skb)->version == 4) ?
804 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
805}
806
807static u16 skb_ip_proto(struct sk_buff *skb)
808{
809 return (ip_hdr(skb)->version == 4) ?
810 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
811}
812
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530813static inline bool be_is_txq_full(struct be_tx_obj *txo)
814{
815 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
816}
817
818static inline bool be_can_txq_wake(struct be_tx_obj *txo)
819{
820 return atomic_read(&txo->q.used) < txo->q.len / 2;
821}
822
823static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
824{
825 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
826}
827
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530828static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
829 struct sk_buff *skb,
830 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530832 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000834 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530835 BE_WRB_F_SET(wrb_params->features, LSO, 1);
836 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000837 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530838 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530840 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530841 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530842 proto = skb_inner_ip_proto(skb);
843 } else {
844 proto = skb_ip_proto(skb);
845 }
846 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530847 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530848 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530849 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 }
851
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100852 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530853 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
854 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855 }
856
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530857 BE_WRB_F_SET(wrb_params->features, CRC, 1);
858}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500859
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530860static void wrb_fill_hdr(struct be_adapter *adapter,
861 struct be_eth_hdr_wrb *hdr,
862 struct be_wrb_params *wrb_params,
863 struct sk_buff *skb)
864{
865 memset(hdr, 0, sizeof(*hdr));
866
867 SET_TX_WRB_HDR_BITS(crc, hdr,
868 BE_WRB_F_GET(wrb_params->features, CRC));
869 SET_TX_WRB_HDR_BITS(ipcs, hdr,
870 BE_WRB_F_GET(wrb_params->features, IPCS));
871 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
872 BE_WRB_F_GET(wrb_params->features, TCPCS));
873 SET_TX_WRB_HDR_BITS(udpcs, hdr,
874 BE_WRB_F_GET(wrb_params->features, UDPCS));
875
876 SET_TX_WRB_HDR_BITS(lso, hdr,
877 BE_WRB_F_GET(wrb_params->features, LSO));
878 SET_TX_WRB_HDR_BITS(lso6, hdr,
879 BE_WRB_F_GET(wrb_params->features, LSO6));
880 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
881
882 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
883 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500884 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530885 SET_TX_WRB_HDR_BITS(event, hdr,
886 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
887 SET_TX_WRB_HDR_BITS(vlan, hdr,
888 BE_WRB_F_GET(wrb_params->features, VLAN));
889 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
890
891 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
892 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530893 SET_TX_WRB_HDR_BITS(mgmt, hdr,
894 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700895}
896
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000897static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530898 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000899{
900 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500901 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000902
Sathya Perla7101e112010-03-22 20:41:12 +0000903
Sathya Perlaf986afc2015-02-06 08:18:43 -0500904 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
905 (u64)le32_to_cpu(wrb->frag_pa_lo);
906 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000907 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500908 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000909 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500910 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000911 }
912}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700913
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530914/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530915static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700916{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530917 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530919 queue_head_inc(&txo->q);
920 return head;
921}
922
923/* Set up the WRB header for xmit */
924static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
925 struct be_tx_obj *txo,
926 struct be_wrb_params *wrb_params,
927 struct sk_buff *skb, u16 head)
928{
929 u32 num_frags = skb_wrb_cnt(skb);
930 struct be_queue_info *txq = &txo->q;
931 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
932
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530933 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500934 be_dws_cpu_to_le(hdr, sizeof(*hdr));
935
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500936 BUG_ON(txo->sent_skb_list[head]);
937 txo->sent_skb_list[head] = skb;
938 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530939 atomic_add(num_frags, &txq->used);
940 txo->last_req_wrb_cnt = num_frags;
941 txo->pend_wrb_cnt += num_frags;
942}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530944/* Setup a WRB fragment (buffer descriptor) for xmit */
945static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
946 int len)
947{
948 struct be_eth_wrb *wrb;
949 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530951 wrb = queue_head_node(txq);
952 wrb_fill(wrb, busaddr, len);
953 queue_head_inc(txq);
954}
955
956/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
957 * was invoked. The producer index is restored to the previous packet and the
958 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
959 */
960static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530961 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530962 u32 copied)
963{
964 struct device *dev;
965 struct be_eth_wrb *wrb;
966 struct be_queue_info *txq = &txo->q;
967
968 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500969 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530970
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500971 /* skip the first wrb (hdr); it's not mapped */
972 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000973 while (copied) {
974 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000975 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000976 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500977 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000978 queue_head_inc(txq);
979 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530980
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500981 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530982}
983
984/* Enqueue the given packet for transmit. This routine allocates WRBs for the
985 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
986 * of WRBs used up by the packet.
987 */
988static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
989 struct sk_buff *skb,
990 struct be_wrb_params *wrb_params)
991{
992 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
993 struct device *dev = &adapter->pdev->dev;
994 struct be_queue_info *txq = &txo->q;
995 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530996 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530997 dma_addr_t busaddr;
998 int len;
999
1000 head = be_tx_get_wrb_hdr(txo);
1001
1002 if (skb->len > skb->data_len) {
1003 len = skb_headlen(skb);
1004
1005 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1006 if (dma_mapping_error(dev, busaddr))
1007 goto dma_err;
1008 map_single = true;
1009 be_tx_setup_wrb_frag(txo, busaddr, len);
1010 copied += len;
1011 }
1012
1013 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1014 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1015 len = skb_frag_size(frag);
1016
1017 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1018 if (dma_mapping_error(dev, busaddr))
1019 goto dma_err;
1020 be_tx_setup_wrb_frag(txo, busaddr, len);
1021 copied += len;
1022 }
1023
1024 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1025
1026 be_tx_stats_update(txo, skb);
1027 return wrb_cnt;
1028
1029dma_err:
1030 adapter->drv_stats.dma_map_errors++;
1031 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +00001032 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033}
1034
Sathya Perlaf7062ee2015-02-06 08:18:35 -05001035static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1036{
1037 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1038}
1039
Somnath Kotur93040ae2012-06-26 22:32:10 +00001040static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001041 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301042 struct be_wrb_params
1043 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +00001044{
1045 u16 vlan_tag = 0;
1046
1047 skb = skb_share_check(skb, GFP_ATOMIC);
1048 if (unlikely(!skb))
1049 return skb;
1050
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001051 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +00001052 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301053
1054 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1055 if (!vlan_tag)
1056 vlan_tag = adapter->pvid;
1057 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1058 * skip VLAN insertion
1059 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301060 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301061 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001062
1063 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001064 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1065 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001066 if (unlikely(!skb))
1067 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001068 skb->vlan_tci = 0;
1069 }
1070
1071 /* Insert the outer VLAN, if any */
1072 if (adapter->qnq_vid) {
1073 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001074 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1075 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001076 if (unlikely(!skb))
1077 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301078 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001079 }
1080
Somnath Kotur93040ae2012-06-26 22:32:10 +00001081 return skb;
1082}
1083
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001084static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1085{
1086 struct ethhdr *eh = (struct ethhdr *)skb->data;
1087 u16 offset = ETH_HLEN;
1088
1089 if (eh->h_proto == htons(ETH_P_IPV6)) {
1090 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1091
1092 offset += sizeof(struct ipv6hdr);
1093 if (ip6h->nexthdr != NEXTHDR_TCP &&
1094 ip6h->nexthdr != NEXTHDR_UDP) {
1095 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301096 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001097
1098 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1099 if (ehdr->hdrlen == 0xff)
1100 return true;
1101 }
1102 }
1103 return false;
1104}
1105
1106static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1107{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001108 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001109}
1110
Sathya Perla748b5392014-05-09 13:29:13 +05301111static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001112{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001113 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001114}
1115
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301116static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1117 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301118 struct be_wrb_params
1119 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001121 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001122 unsigned int eth_hdr_len;
1123 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001124
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001125 /* For padded packets, BE HW modifies tot_len field in IP header
1126 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001127 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001128 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001129 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1130 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001131 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001132 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001133 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001134 ip = (struct iphdr *)ip_hdr(skb);
1135 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1136 }
1137
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001138 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301139 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001140 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301141 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001142 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301143 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001144
Somnath Kotur93040ae2012-06-26 22:32:10 +00001145 /* HW has a bug wherein it will calculate CSUM for VLAN
1146 * pkts even though it is disabled.
1147 * Manually insert VLAN in pkt.
1148 */
1149 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001150 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301151 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001152 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301153 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001154 }
1155
1156 /* HW may lockup when VLAN HW tagging is requested on
1157 * certain ipv6 packets. Drop such pkts if the HW workaround to
1158 * skip HW tagging is not enabled by FW.
1159 */
1160 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301161 (adapter->pvid || adapter->qnq_vid) &&
1162 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001163 goto tx_drop;
1164
1165 /* Manual VLAN tag insertion to prevent:
1166 * ASIC lockup when the ASIC inserts VLAN tag into
1167 * certain ipv6 packets. Insert VLAN tags in driver,
1168 * and set event, completion, vlan bits accordingly
1169 * in the Tx WRB.
1170 */
1171 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1172 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301173 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001174 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301175 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001176 }
1177
Sathya Perlaee9c7992013-05-22 23:04:55 +00001178 return skb;
1179tx_drop:
1180 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301181err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001182 return NULL;
1183}
1184
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301185static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1186 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301187 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301188{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301189 int err;
1190
Suresh Reddy8227e992015-10-12 03:47:19 -04001191 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1192 * packets that are 32b or less may cause a transmit stall
1193 * on that port. The workaround is to pad such packets
1194 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301195 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001196 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001197 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301198 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301199 }
1200
1201 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301202 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301203 if (!skb)
1204 return NULL;
1205 }
1206
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301207 /* The stack can send us skbs with length greater than
1208 * what the HW can handle. Trim the extra bytes.
1209 */
1210 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1211 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1212 WARN_ON(err);
1213
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301214 return skb;
1215}
1216
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001217static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1218{
1219 struct be_queue_info *txq = &txo->q;
1220 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1221
1222 /* Mark the last request eventable if it hasn't been marked already */
1223 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1224 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1225
1226 /* compose a dummy wrb if there are odd set of wrbs to notify */
1227 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001228 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001229 queue_head_inc(txq);
1230 atomic_inc(&txq->used);
1231 txo->pend_wrb_cnt++;
1232 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1233 TX_HDR_WRB_NUM_SHIFT);
1234 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1235 TX_HDR_WRB_NUM_SHIFT);
1236 }
1237 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1238 txo->pend_wrb_cnt = 0;
1239}
1240
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301241/* OS2BMC related */
1242
1243#define DHCP_CLIENT_PORT 68
1244#define DHCP_SERVER_PORT 67
1245#define NET_BIOS_PORT1 137
1246#define NET_BIOS_PORT2 138
1247#define DHCPV6_RAS_PORT 547
1248
1249#define is_mc_allowed_on_bmc(adapter, eh) \
1250 (!is_multicast_filt_enabled(adapter) && \
1251 is_multicast_ether_addr(eh->h_dest) && \
1252 !is_broadcast_ether_addr(eh->h_dest))
1253
1254#define is_bc_allowed_on_bmc(adapter, eh) \
1255 (!is_broadcast_filt_enabled(adapter) && \
1256 is_broadcast_ether_addr(eh->h_dest))
1257
1258#define is_arp_allowed_on_bmc(adapter, skb) \
1259 (is_arp(skb) && is_arp_filt_enabled(adapter))
1260
1261#define is_broadcast_packet(eh, adapter) \
1262 (is_multicast_ether_addr(eh->h_dest) && \
1263 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1264
1265#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1266
1267#define is_arp_filt_enabled(adapter) \
1268 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1269
1270#define is_dhcp_client_filt_enabled(adapter) \
1271 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1272
1273#define is_dhcp_srvr_filt_enabled(adapter) \
1274 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1275
1276#define is_nbios_filt_enabled(adapter) \
1277 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1278
1279#define is_ipv6_na_filt_enabled(adapter) \
1280 (adapter->bmc_filt_mask & \
1281 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1282
1283#define is_ipv6_ra_filt_enabled(adapter) \
1284 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1285
1286#define is_ipv6_ras_filt_enabled(adapter) \
1287 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1288
1289#define is_broadcast_filt_enabled(adapter) \
1290 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1291
1292#define is_multicast_filt_enabled(adapter) \
1293 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1294
1295static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1296 struct sk_buff **skb)
1297{
1298 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1299 bool os2bmc = false;
1300
1301 if (!be_is_os2bmc_enabled(adapter))
1302 goto done;
1303
1304 if (!is_multicast_ether_addr(eh->h_dest))
1305 goto done;
1306
1307 if (is_mc_allowed_on_bmc(adapter, eh) ||
1308 is_bc_allowed_on_bmc(adapter, eh) ||
1309 is_arp_allowed_on_bmc(adapter, (*skb))) {
1310 os2bmc = true;
1311 goto done;
1312 }
1313
1314 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1315 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1316 u8 nexthdr = hdr->nexthdr;
1317
1318 if (nexthdr == IPPROTO_ICMPV6) {
1319 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1320
1321 switch (icmp6->icmp6_type) {
1322 case NDISC_ROUTER_ADVERTISEMENT:
1323 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1324 goto done;
1325 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1326 os2bmc = is_ipv6_na_filt_enabled(adapter);
1327 goto done;
1328 default:
1329 break;
1330 }
1331 }
1332 }
1333
1334 if (is_udp_pkt((*skb))) {
1335 struct udphdr *udp = udp_hdr((*skb));
1336
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001337 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301338 case DHCP_CLIENT_PORT:
1339 os2bmc = is_dhcp_client_filt_enabled(adapter);
1340 goto done;
1341 case DHCP_SERVER_PORT:
1342 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1343 goto done;
1344 case NET_BIOS_PORT1:
1345 case NET_BIOS_PORT2:
1346 os2bmc = is_nbios_filt_enabled(adapter);
1347 goto done;
1348 case DHCPV6_RAS_PORT:
1349 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1350 goto done;
1351 default:
1352 break;
1353 }
1354 }
1355done:
1356 /* For packets over a vlan, which are destined
1357 * to BMC, asic expects the vlan to be inline in the packet.
1358 */
1359 if (os2bmc)
1360 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1361
1362 return os2bmc;
1363}
1364
Sathya Perlaee9c7992013-05-22 23:04:55 +00001365static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1366{
1367 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001368 u16 q_idx = skb_get_queue_mapping(skb);
1369 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301370 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301371 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001372 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001373
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301374 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001375 if (unlikely(!skb))
1376 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001377
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301378 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1379
1380 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001381 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001382 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001383 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001385
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301386 /* if os2bmc is enabled and if the pkt is destined to bmc,
1387 * enqueue the pkt a 2nd time with mgmt bit set.
1388 */
1389 if (be_send_pkt_to_bmc(adapter, &skb)) {
1390 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1391 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1392 if (unlikely(!wrb_cnt))
1393 goto drop;
1394 else
1395 skb_get(skb);
1396 }
1397
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301398 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001399 netif_stop_subqueue(netdev, q_idx);
1400 tx_stats(txo)->tx_stops++;
1401 }
1402
1403 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1404 be_xmit_flush(adapter, txo);
1405
1406 return NETDEV_TX_OK;
1407drop:
1408 tx_stats(txo)->tx_drv_drops++;
1409 /* Flush the already enqueued tx requests */
1410 if (flush && txo->pend_wrb_cnt)
1411 be_xmit_flush(adapter, txo);
1412
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 return NETDEV_TX_OK;
1414}
1415
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001416static inline bool be_in_all_promisc(struct be_adapter *adapter)
1417{
1418 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1419 BE_IF_FLAGS_ALL_PROMISCUOUS;
1420}
1421
1422static int be_set_vlan_promisc(struct be_adapter *adapter)
1423{
1424 struct device *dev = &adapter->pdev->dev;
1425 int status;
1426
1427 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1428 return 0;
1429
1430 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1431 if (!status) {
1432 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1433 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1434 } else {
1435 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1436 }
1437 return status;
1438}
1439
1440static int be_clear_vlan_promisc(struct be_adapter *adapter)
1441{
1442 struct device *dev = &adapter->pdev->dev;
1443 int status;
1444
1445 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1446 if (!status) {
1447 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1448 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1449 }
1450 return status;
1451}
1452
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001454 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1455 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 */
Sathya Perla10329df2012-06-05 19:37:18 +00001457static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458{
Vasundhara Volam50762662014-09-12 17:39:14 +05301459 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001460 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301461 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001462 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001463
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001464 /* No need to change the VLAN state if the I/F is in promiscuous */
1465 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001466 return 0;
1467
Sathya Perla92bf14a2013-08-27 16:57:32 +05301468 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001469 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001470
Somnath Kotur841f60f2016-07-27 05:26:15 -04001471 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1472 status = be_clear_vlan_promisc(adapter);
1473 if (status)
1474 return status;
1475 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001476 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301477 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1478 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001479
Vasundhara Volam435452a2015-03-20 06:28:23 -04001480 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001481 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001482 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001483 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001484 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1485 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301486 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001487 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001489 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
Patrick McHardy80d5c362013-04-19 02:04:28 +00001492static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493{
1494 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001495 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496
Sathya Perlab7172412016-07-27 05:26:18 -04001497 mutex_lock(&adapter->rx_filter_lock);
1498
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001499 /* Packets with VID 0 are always received by Lancer by default */
1500 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001501 goto done;
Vasundhara Volam48291c22014-03-11 18:53:08 +05301502
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301503 if (test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001504 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001505
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301506 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301507 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001508
Sathya Perlab7172412016-07-27 05:26:18 -04001509 status = be_vid_config(adapter);
1510done:
1511 mutex_unlock(&adapter->rx_filter_lock);
1512 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513}
1514
Patrick McHardy80d5c362013-04-19 02:04:28 +00001515static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516{
1517 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perlab7172412016-07-27 05:26:18 -04001518 int status = 0;
1519
1520 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001522 /* Packets with VID 0 are always received by Lancer by default */
1523 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001524 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001525
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301526 if (!test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001527 goto done;
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301528
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301529 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301530 adapter->vlans_added--;
1531
Sathya Perlab7172412016-07-27 05:26:18 -04001532 status = be_vid_config(adapter);
1533done:
1534 mutex_unlock(&adapter->rx_filter_lock);
1535 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536}
1537
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001538static void be_set_all_promisc(struct be_adapter *adapter)
1539{
1540 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1541 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1542}
1543
1544static void be_set_mc_promisc(struct be_adapter *adapter)
1545{
1546 int status;
1547
1548 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1549 return;
1550
1551 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1552 if (!status)
1553 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1554}
1555
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001556static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001557{
1558 int status;
1559
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001560 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1561 return;
1562
1563 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001564 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001565 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1566}
1567
1568static void be_clear_uc_promisc(struct be_adapter *adapter)
1569{
1570 int status;
1571
1572 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1573 return;
1574
1575 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1576 if (!status)
1577 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1578}
1579
1580/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1581 * We use a single callback function for both sync and unsync. We really don't
1582 * add/remove addresses through this callback. But, we use it to detect changes
1583 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1584 */
1585static int be_uc_list_update(struct net_device *netdev,
1586 const unsigned char *addr)
1587{
1588 struct be_adapter *adapter = netdev_priv(netdev);
1589
1590 adapter->update_uc_list = true;
1591 return 0;
1592}
1593
1594static int be_mc_list_update(struct net_device *netdev,
1595 const unsigned char *addr)
1596{
1597 struct be_adapter *adapter = netdev_priv(netdev);
1598
1599 adapter->update_mc_list = true;
1600 return 0;
1601}
1602
1603static void be_set_mc_list(struct be_adapter *adapter)
1604{
1605 struct net_device *netdev = adapter->netdev;
Sathya Perlab7172412016-07-27 05:26:18 -04001606 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001607 bool mc_promisc = false;
1608 int status;
1609
Sathya Perlab7172412016-07-27 05:26:18 -04001610 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001611 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1612
1613 if (netdev->flags & IFF_PROMISC) {
1614 adapter->update_mc_list = false;
1615 } else if (netdev->flags & IFF_ALLMULTI ||
1616 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1617 /* Enable multicast promisc if num configured exceeds
1618 * what we support
1619 */
1620 mc_promisc = true;
1621 adapter->update_mc_list = false;
1622 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1623 /* Update mc-list unconditionally if the iface was previously
1624 * in mc-promisc mode and now is out of that mode.
1625 */
1626 adapter->update_mc_list = true;
1627 }
1628
Sathya Perlab7172412016-07-27 05:26:18 -04001629 if (adapter->update_mc_list) {
1630 int i = 0;
1631
1632 /* cache the mc-list in adapter */
1633 netdev_for_each_mc_addr(ha, netdev) {
1634 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1635 i++;
1636 }
1637 adapter->mc_count = netdev_mc_count(netdev);
1638 }
1639 netif_addr_unlock_bh(netdev);
1640
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001641 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001642 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001643 } else if (adapter->update_mc_list) {
1644 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1645 if (!status)
1646 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1647 else
1648 be_set_mc_promisc(adapter);
1649
1650 adapter->update_mc_list = false;
1651 }
1652}
1653
1654static void be_clear_mc_list(struct be_adapter *adapter)
1655{
1656 struct net_device *netdev = adapter->netdev;
1657
1658 __dev_mc_unsync(netdev, NULL);
1659 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlab7172412016-07-27 05:26:18 -04001660 adapter->mc_count = 0;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001661}
1662
Suresh Reddy988d44b2016-09-07 19:57:52 +05301663static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1664{
Ivan Vecera1d0f1102017-01-06 20:30:02 +01001665 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05301666 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1667 return 0;
1668 }
1669
Ivan Vecera1d0f1102017-01-06 20:30:02 +01001670 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
Suresh Reddy988d44b2016-09-07 19:57:52 +05301671 adapter->if_handle,
1672 &adapter->pmac_id[uc_idx + 1], 0);
1673}
1674
1675static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1676{
1677 if (pmac_id == adapter->pmac_id[0])
1678 return;
1679
1680 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1681}
1682
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001683static void be_set_uc_list(struct be_adapter *adapter)
1684{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001685 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001686 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001687 bool uc_promisc = false;
Sathya Perlab7172412016-07-27 05:26:18 -04001688 int curr_uc_macs = 0, i;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001689
Sathya Perlab7172412016-07-27 05:26:18 -04001690 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001691 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001692
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001693 if (netdev->flags & IFF_PROMISC) {
1694 adapter->update_uc_list = false;
1695 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1696 uc_promisc = true;
1697 adapter->update_uc_list = false;
1698 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1699 /* Update uc-list unconditionally if the iface was previously
1700 * in uc-promisc mode and now is out of that mode.
1701 */
1702 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001703 }
1704
Sathya Perlab7172412016-07-27 05:26:18 -04001705 if (adapter->update_uc_list) {
Sathya Perlab7172412016-07-27 05:26:18 -04001706 /* cache the uc-list in adapter array */
Ivan Vecera6052cd12017-01-06 21:59:30 +01001707 i = 0;
Sathya Perlab7172412016-07-27 05:26:18 -04001708 netdev_for_each_uc_addr(ha, netdev) {
1709 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1710 i++;
1711 }
1712 curr_uc_macs = netdev_uc_count(netdev);
1713 }
1714 netif_addr_unlock_bh(netdev);
1715
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001716 if (uc_promisc) {
1717 be_set_uc_promisc(adapter);
1718 } else if (adapter->update_uc_list) {
1719 be_clear_uc_promisc(adapter);
1720
Sathya Perlab7172412016-07-27 05:26:18 -04001721 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301722 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001723
Sathya Perlab7172412016-07-27 05:26:18 -04001724 for (i = 0; i < curr_uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301725 be_uc_mac_add(adapter, i);
Sathya Perlab7172412016-07-27 05:26:18 -04001726 adapter->uc_macs = curr_uc_macs;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001727 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001728 }
1729}
1730
1731static void be_clear_uc_list(struct be_adapter *adapter)
1732{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001733 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001734 int i;
1735
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001736 __dev_uc_unsync(netdev, NULL);
Sathya Perlab7172412016-07-27 05:26:18 -04001737 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301738 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1739
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001740 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301741}
1742
Sathya Perlab7172412016-07-27 05:26:18 -04001743static void __be_set_rx_mode(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744{
Sathya Perlab7172412016-07-27 05:26:18 -04001745 struct net_device *netdev = adapter->netdev;
1746
1747 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748
1749 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001750 if (!be_in_all_promisc(adapter))
1751 be_set_all_promisc(adapter);
1752 } else if (be_in_all_promisc(adapter)) {
1753 /* We need to re-program the vlan-list or clear
1754 * vlan-promisc mode (if needed) when the interface
1755 * comes out of promisc mode.
1756 */
1757 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001759
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001760 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001761 be_set_mc_list(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04001762
1763 mutex_unlock(&adapter->rx_filter_lock);
1764}
1765
1766static void be_work_set_rx_mode(struct work_struct *work)
1767{
1768 struct be_cmd_work *cmd_work =
1769 container_of(work, struct be_cmd_work, work);
1770
1771 __be_set_rx_mode(cmd_work->adapter);
1772 kfree(cmd_work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773}
1774
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001775static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1776{
1777 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001778 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001779 int status;
1780
Sathya Perla11ac75e2011-12-13 00:58:50 +00001781 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001782 return -EPERM;
1783
Sathya Perla11ac75e2011-12-13 00:58:50 +00001784 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001785 return -EINVAL;
1786
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301787 /* Proceed further only if user provided MAC is different
1788 * from active MAC
1789 */
1790 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1791 return 0;
1792
Sathya Perla3175d8c2013-07-23 15:25:03 +05301793 if (BEx_chip(adapter)) {
1794 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1795 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001796
Sathya Perla11ac75e2011-12-13 00:58:50 +00001797 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1798 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301799 } else {
1800 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1801 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001802 }
1803
Kalesh APabccf232014-07-17 16:20:24 +05301804 if (status) {
1805 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1806 mac, vf, status);
1807 return be_cmd_status(status);
1808 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001809
Kalesh APabccf232014-07-17 16:20:24 +05301810 ether_addr_copy(vf_cfg->mac_addr, mac);
1811
1812 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001813}
1814
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001815static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301816 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001817{
1818 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001819 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001820
Sathya Perla11ac75e2011-12-13 00:58:50 +00001821 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001822 return -EPERM;
1823
Sathya Perla11ac75e2011-12-13 00:58:50 +00001824 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001825 return -EINVAL;
1826
1827 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001828 vi->max_tx_rate = vf_cfg->tx_rate;
1829 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001830 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1831 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001832 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301833 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001834 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001835
1836 return 0;
1837}
1838
Vasundhara Volam435452a2015-03-20 06:28:23 -04001839static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1840{
1841 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1842 u16 vids[BE_NUM_VLANS_SUPPORTED];
1843 int vf_if_id = vf_cfg->if_handle;
1844 int status;
1845
1846 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001847 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001848 if (status)
1849 return status;
1850
1851 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1852 vids[0] = 0;
1853 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1854 if (!status)
1855 dev_info(&adapter->pdev->dev,
1856 "Cleared guest VLANs on VF%d", vf);
1857
1858 /* After TVT is enabled, disallow VFs to program VLAN filters */
1859 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1860 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1861 ~BE_PRIV_FILTMGMT, vf + 1);
1862 if (!status)
1863 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1864 }
1865 return 0;
1866}
1867
1868static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1869{
1870 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1871 struct device *dev = &adapter->pdev->dev;
1872 int status;
1873
1874 /* Reset Transparent VLAN Tagging. */
1875 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001876 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001877 if (status)
1878 return status;
1879
1880 /* Allow VFs to program VLAN filtering */
1881 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1882 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1883 BE_PRIV_FILTMGMT, vf + 1);
1884 if (!status) {
1885 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1886 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1887 }
1888 }
1889
1890 dev_info(dev,
1891 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1892 return 0;
1893}
1894
Moshe Shemesh79aab092016-09-22 12:11:15 +03001895static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1896 __be16 vlan_proto)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001897{
1898 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001899 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001900 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001901
Sathya Perla11ac75e2011-12-13 00:58:50 +00001902 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001903 return -EPERM;
1904
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001905 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001906 return -EINVAL;
1907
Moshe Shemesh79aab092016-09-22 12:11:15 +03001908 if (vlan_proto != htons(ETH_P_8021Q))
1909 return -EPROTONOSUPPORT;
1910
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001911 if (vlan || qos) {
1912 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001913 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001914 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001915 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001916 }
1917
Kalesh APabccf232014-07-17 16:20:24 +05301918 if (status) {
1919 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001920 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1921 status);
Kalesh APabccf232014-07-17 16:20:24 +05301922 return be_cmd_status(status);
1923 }
1924
1925 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301926 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001927}
1928
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001929static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1930 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001931{
1932 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301933 struct device *dev = &adapter->pdev->dev;
1934 int percent_rate, status = 0;
1935 u16 link_speed = 0;
1936 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001937
Sathya Perla11ac75e2011-12-13 00:58:50 +00001938 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001939 return -EPERM;
1940
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001941 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001942 return -EINVAL;
1943
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001944 if (min_tx_rate)
1945 return -EINVAL;
1946
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301947 if (!max_tx_rate)
1948 goto config_qos;
1949
1950 status = be_cmd_link_status_query(adapter, &link_speed,
1951 &link_status, 0);
1952 if (status)
1953 goto err;
1954
1955 if (!link_status) {
1956 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301957 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301958 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001959 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001960
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301961 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1962 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1963 link_speed);
1964 status = -EINVAL;
1965 goto err;
1966 }
1967
1968 /* On Skyhawk the QOS setting must be done only as a % value */
1969 percent_rate = link_speed / 100;
1970 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1971 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1972 percent_rate);
1973 status = -EINVAL;
1974 goto err;
1975 }
1976
1977config_qos:
1978 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001979 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301980 goto err;
1981
1982 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1983 return 0;
1984
1985err:
1986 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1987 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301988 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001989}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301990
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301991static int be_set_vf_link_state(struct net_device *netdev, int vf,
1992 int link_state)
1993{
1994 struct be_adapter *adapter = netdev_priv(netdev);
1995 int status;
1996
1997 if (!sriov_enabled(adapter))
1998 return -EPERM;
1999
2000 if (vf >= adapter->num_vfs)
2001 return -EINVAL;
2002
2003 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05302004 if (status) {
2005 dev_err(&adapter->pdev->dev,
2006 "Link state change on VF %d failed: %#x\n", vf, status);
2007 return be_cmd_status(status);
2008 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302009
Kalesh APabccf232014-07-17 16:20:24 +05302010 adapter->vf_cfg[vf].plink_tracking = link_state;
2011
2012 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302013}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002014
Kalesh APe7bcbd72015-05-06 05:30:32 -04002015static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2016{
2017 struct be_adapter *adapter = netdev_priv(netdev);
2018 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2019 u8 spoofchk;
2020 int status;
2021
2022 if (!sriov_enabled(adapter))
2023 return -EPERM;
2024
2025 if (vf >= adapter->num_vfs)
2026 return -EINVAL;
2027
2028 if (BEx_chip(adapter))
2029 return -EOPNOTSUPP;
2030
2031 if (enable == vf_cfg->spoofchk)
2032 return 0;
2033
2034 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2035
2036 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2037 0, spoofchk);
2038 if (status) {
2039 dev_err(&adapter->pdev->dev,
2040 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2041 return be_cmd_status(status);
2042 }
2043
2044 vf_cfg->spoofchk = enable;
2045 return 0;
2046}
2047
Sathya Perla2632baf2013-10-01 16:00:00 +05302048static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2049 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002050{
Sathya Perla2632baf2013-10-01 16:00:00 +05302051 aic->rx_pkts_prev = rx_pkts;
2052 aic->tx_reqs_prev = tx_pkts;
2053 aic->jiffies = now;
2054}
Sathya Perlaac124ff2011-07-25 19:10:14 +00002055
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002056static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05302057{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002058 struct be_adapter *adapter = eqo->adapter;
2059 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05302060 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05302061 struct be_rx_obj *rxo;
2062 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002063 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05302064 ulong now;
2065 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002066 int i;
2067
2068 aic = &adapter->aic_obj[eqo->idx];
2069 if (!aic->enable) {
2070 if (aic->jiffies)
2071 aic->jiffies = 0;
2072 eqd = aic->et_eqd;
2073 return eqd;
2074 }
2075
2076 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2077 do {
2078 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2079 rx_pkts += rxo->stats.rx_pkts;
2080 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2081 }
2082
2083 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2084 do {
2085 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2086 tx_pkts += txo->stats.tx_reqs;
2087 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2088 }
2089
2090 /* Skip, if wrapped around or first calculation */
2091 now = jiffies;
2092 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2093 rx_pkts < aic->rx_pkts_prev ||
2094 tx_pkts < aic->tx_reqs_prev) {
2095 be_aic_update(aic, rx_pkts, tx_pkts, now);
2096 return aic->prev_eqd;
2097 }
2098
2099 delta = jiffies_to_msecs(now - aic->jiffies);
2100 if (delta == 0)
2101 return aic->prev_eqd;
2102
2103 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2104 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2105 eqd = (pps / 15000) << 2;
2106
2107 if (eqd < 8)
2108 eqd = 0;
2109 eqd = min_t(u32, eqd, aic->max_eqd);
2110 eqd = max_t(u32, eqd, aic->min_eqd);
2111
2112 be_aic_update(aic, rx_pkts, tx_pkts, now);
2113
2114 return eqd;
2115}
2116
2117/* For Skyhawk-R only */
2118static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2119{
2120 struct be_adapter *adapter = eqo->adapter;
2121 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2122 ulong now = jiffies;
2123 int eqd;
2124 u32 mult_enc;
2125
2126 if (!aic->enable)
2127 return 0;
2128
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302129 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002130 eqd = aic->prev_eqd;
2131 else
2132 eqd = be_get_new_eqd(eqo);
2133
2134 if (eqd > 100)
2135 mult_enc = R2I_DLY_ENC_1;
2136 else if (eqd > 60)
2137 mult_enc = R2I_DLY_ENC_2;
2138 else if (eqd > 20)
2139 mult_enc = R2I_DLY_ENC_3;
2140 else
2141 mult_enc = R2I_DLY_ENC_0;
2142
2143 aic->prev_eqd = eqd;
2144
2145 return mult_enc;
2146}
2147
2148void be_eqd_update(struct be_adapter *adapter, bool force_update)
2149{
2150 struct be_set_eqd set_eqd[MAX_EVT_QS];
2151 struct be_aic_obj *aic;
2152 struct be_eq_obj *eqo;
2153 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154
Sathya Perla2632baf2013-10-01 16:00:00 +05302155 for_all_evt_queues(adapter, eqo, i) {
2156 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002157 eqd = be_get_new_eqd(eqo);
2158 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302159 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2160 set_eqd[num].eq_id = eqo->q.id;
2161 aic->prev_eqd = eqd;
2162 num++;
2163 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002164 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302165
2166 if (num)
2167 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002168}
2169
Sathya Perla3abcded2010-10-03 22:12:27 -07002170static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302171 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002172{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002173 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002174
Sathya Perlaab1594e2011-07-25 19:10:15 +00002175 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002176 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002177 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302179 if (rxcp->tunneled)
2180 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002181 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002182 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002183 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002184 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002185 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186}
2187
Sathya Perla2e588f82011-03-11 02:49:26 +00002188static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002189{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002190 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302191 * Also ignore ipcksm for ipv6 pkts
2192 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002193 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302194 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002195}
2196
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302197static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002199 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002201 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302202 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203
Sathya Perla3abcded2010-10-03 22:12:27 -07002204 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002205 BUG_ON(!rx_page_info->page);
2206
Sathya Perlae50287b2014-03-04 12:14:38 +05302207 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002208 dma_unmap_page(&adapter->pdev->dev,
2209 dma_unmap_addr(rx_page_info, bus),
2210 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302211 rx_page_info->last_frag = false;
2212 } else {
2213 dma_sync_single_for_cpu(&adapter->pdev->dev,
2214 dma_unmap_addr(rx_page_info, bus),
2215 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002216 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302218 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219 atomic_dec(&rxq->used);
2220 return rx_page_info;
2221}
2222
2223/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224static void be_rx_compl_discard(struct be_rx_obj *rxo,
2225 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002228 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002230 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302231 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002232 put_page(page_info->page);
2233 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234 }
2235}
2236
2237/*
2238 * skb_fill_rx_data forms a complete skb for an ether frame
2239 * indicated by rxcp.
2240 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2242 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002245 u16 i, j;
2246 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247 u8 *start;
2248
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302249 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 start = page_address(page_info->page) + page_info->page_offset;
2251 prefetch(start);
2252
2253 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002254 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256 skb->len = curr_frag_len;
2257 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002258 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259 /* Complete packet has now been moved to data */
2260 put_page(page_info->page);
2261 skb->data_len = 0;
2262 skb->tail += curr_frag_len;
2263 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002264 hdr_len = ETH_HLEN;
2265 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002267 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268 skb_shinfo(skb)->frags[0].page_offset =
2269 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302270 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2271 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002273 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274 skb->tail += hdr_len;
2275 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002276 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277
Sathya Perla2e588f82011-03-11 02:49:26 +00002278 if (rxcp->pkt_size <= rx_frag_size) {
2279 BUG_ON(rxcp->num_rcvd != 1);
2280 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281 }
2282
2283 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002284 remaining = rxcp->pkt_size - curr_frag_len;
2285 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302286 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002287 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002289 /* Coalesce all frags from the same physical page in one slot */
2290 if (page_info->page_offset == 0) {
2291 /* Fresh page */
2292 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002293 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002294 skb_shinfo(skb)->frags[j].page_offset =
2295 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002296 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002297 skb_shinfo(skb)->nr_frags++;
2298 } else {
2299 put_page(page_info->page);
2300 }
2301
Eric Dumazet9e903e02011-10-18 21:00:24 +00002302 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303 skb->len += curr_frag_len;
2304 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002305 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002306 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002307 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002309 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310}
2311
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002312/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302313static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002315{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002316 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002317 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002319
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002320 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002321 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002322 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 return;
2325 }
2326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002329 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002330 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002331 else
2332 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002334 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002335 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002336 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002337 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302338
Tom Herbertb6c0e892014-08-27 21:27:17 -07002339 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302340 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002341
Jiri Pirko343e43c2011-08-25 02:50:51 +00002342 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002343 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002344
2345 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346}
2347
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002348/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002349static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2350 struct napi_struct *napi,
2351 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002355 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002356 u16 remaining, curr_frag_len;
2357 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002358
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002359 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002360 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002362 return;
2363 }
2364
Sathya Perla2e588f82011-03-11 02:49:26 +00002365 remaining = rxcp->pkt_size;
2366 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302367 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368
2369 curr_frag_len = min(remaining, rx_frag_size);
2370
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002371 /* Coalesce all frags from the same physical page in one slot */
2372 if (i == 0 || page_info->page_offset == 0) {
2373 /* First frag or Fresh page */
2374 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002375 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002376 skb_shinfo(skb)->frags[j].page_offset =
2377 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002378 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002379 } else {
2380 put_page(page_info->page);
2381 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002382 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002383 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002385 memset(page_info, 0, sizeof(*page_info));
2386 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002387 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002389 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002390 skb->len = rxcp->pkt_size;
2391 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002392 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002393 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002394 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002395 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302396
Tom Herbertb6c0e892014-08-27 21:27:17 -07002397 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002398
Jiri Pirko343e43c2011-08-25 02:50:51 +00002399 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002400 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002401
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002402 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002403}
2404
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002405static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2406 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002407{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302408 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2409 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2410 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2411 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2412 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2413 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2414 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2415 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2416 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2417 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2418 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002419 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302420 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2421 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002422 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302423 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302424 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302425 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002426}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002427
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2429 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002430{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302431 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2432 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2433 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2434 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2435 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2436 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2437 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2438 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2439 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2440 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2441 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002442 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302443 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2444 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002445 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302446 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2447 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002448}
2449
2450static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2451{
2452 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2453 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2454 struct be_adapter *adapter = rxo->adapter;
2455
2456 /* For checking the valid bit it is Ok to use either definition as the
2457 * valid bit is at the same position in both v0 and v1 Rx compl */
2458 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002459 return NULL;
2460
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002461 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002462 be_dws_le_to_cpu(compl, sizeof(*compl));
2463
2464 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002466 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002468
Somnath Koture38b1702013-05-29 22:55:56 +00002469 if (rxcp->ip_frag)
2470 rxcp->l4_csum = 0;
2471
Sathya Perla15d72182011-03-21 20:49:26 +00002472 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302473 /* In QNQ modes, if qnq bit is not set, then the packet was
2474 * tagged only with the transparent outer vlan-tag and must
2475 * not be treated as a vlan packet by host
2476 */
2477 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002478 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002479
Sathya Perla15d72182011-03-21 20:49:26 +00002480 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002481 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002482
Somnath Kotur939cf302011-08-18 21:51:49 -07002483 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302484 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002485 rxcp->vlanf = 0;
2486 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002487
2488 /* As the compl has been parsed, reset it; we wont touch it again */
2489 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490
Sathya Perla3abcded2010-10-03 22:12:27 -07002491 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002492 return rxcp;
2493}
2494
Eric Dumazet1829b082011-03-01 05:48:12 +00002495static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002498
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002500 gfp |= __GFP_COMP;
2501 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002502}
2503
2504/*
2505 * Allocate a page, split it to fragments of size rx_frag_size and post as
2506 * receive buffers to BE
2507 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302508static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002509{
Sathya Perla3abcded2010-10-03 22:12:27 -07002510 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002511 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002512 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002514 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002515 struct be_eth_rx_d *rxd;
2516 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302517 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002518
Sathya Perla3abcded2010-10-03 22:12:27 -07002519 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302520 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002522 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002524 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002525 break;
2526 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002527 page_dmaaddr = dma_map_page(dev, pagep, 0,
2528 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002529 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002530 if (dma_mapping_error(dev, page_dmaaddr)) {
2531 put_page(pagep);
2532 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302533 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002534 break;
2535 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302536 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002537 } else {
2538 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302539 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302541 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002542 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002543
2544 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302545 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002546 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2547 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002548
2549 /* Any space left in the current big page for another frag? */
2550 if ((page_offset + rx_frag_size + rx_frag_size) >
2551 adapter->big_page_size) {
2552 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302553 page_info->last_frag = true;
2554 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2555 } else {
2556 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002557 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002558
2559 prev_page_info = page_info;
2560 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002561 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002562 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302563
2564 /* Mark the last frag of a page when we break out of the above loop
2565 * with no more slots available in the RXQ
2566 */
2567 if (pagep) {
2568 prev_page_info->last_frag = true;
2569 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2570 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571
2572 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002573 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302574 if (rxo->rx_post_starved)
2575 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302576 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002577 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302578 be_rxq_notify(adapter, rxq->id, notify);
2579 posted -= notify;
2580 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002581 } else if (atomic_read(&rxq->used) == 0) {
2582 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002583 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002584 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002585}
2586
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302587static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002588{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302589 struct be_queue_info *tx_cq = &txo->cq;
2590 struct be_tx_compl_info *txcp = &txo->txcp;
2591 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002592
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302593 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002594 return NULL;
2595
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302596 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002597 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302598 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002599
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302600 txcp->status = GET_TX_COMPL_BITS(status, compl);
2601 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002602
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302603 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002604 queue_tail_inc(tx_cq);
2605 return txcp;
2606}
2607
Sathya Perla3c8def92011-06-12 20:01:58 +00002608static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302609 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002610{
Sathya Perla3c8def92011-06-12 20:01:58 +00002611 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002612 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002613 struct sk_buff *skb = NULL;
2614 bool unmap_skb_hdr = false;
2615 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302616 u16 num_wrbs = 0;
2617 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002618
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002619 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002620 if (sent_skbs[txq->tail]) {
2621 /* Free skb from prev req */
2622 if (skb)
2623 dev_consume_skb_any(skb);
2624 skb = sent_skbs[txq->tail];
2625 sent_skbs[txq->tail] = NULL;
2626 queue_tail_inc(txq); /* skip hdr wrb */
2627 num_wrbs++;
2628 unmap_skb_hdr = true;
2629 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002630 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002631 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002632 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002633 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002634 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002635 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002636 num_wrbs++;
2637 } while (frag_index != last_index);
2638 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002639
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002640 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641}
2642
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002643/* Return the number of events in the event queue */
2644static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002645{
2646 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002647 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002648
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002649 do {
2650 eqe = queue_tail_node(&eqo->q);
2651 if (eqe->evt == 0)
2652 break;
2653
2654 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002655 eqe->evt = 0;
2656 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002657 queue_tail_inc(&eqo->q);
2658 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002659
2660 return num;
2661}
2662
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002663/* Leaves the EQ is disarmed state */
2664static void be_eq_clean(struct be_eq_obj *eqo)
2665{
2666 int num = events_get(eqo);
2667
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002668 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002669}
2670
Kalesh AP99b44302015-08-05 03:27:49 -04002671/* Free posted rx buffers that were not used */
2672static void be_rxq_clean(struct be_rx_obj *rxo)
2673{
2674 struct be_queue_info *rxq = &rxo->q;
2675 struct be_rx_page_info *page_info;
2676
2677 while (atomic_read(&rxq->used) > 0) {
2678 page_info = get_rx_page_info(rxo);
2679 put_page(page_info->page);
2680 memset(page_info, 0, sizeof(*page_info));
2681 }
2682 BUG_ON(atomic_read(&rxq->used));
2683 rxq->tail = 0;
2684 rxq->head = 0;
2685}
2686
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002687static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002688{
Sathya Perla3abcded2010-10-03 22:12:27 -07002689 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002690 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002691 struct be_adapter *adapter = rxo->adapter;
2692 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002693
Sathya Perlad23e9462012-12-17 19:38:51 +00002694 /* Consume pending rx completions.
2695 * Wait for the flush completion (identified by zero num_rcvd)
2696 * to arrive. Notify CQ even when there are no more CQ entries
2697 * for HW to flush partially coalesced CQ entries.
2698 * In Lancer, there is no need to wait for flush compl.
2699 */
2700 for (;;) {
2701 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302702 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002703 if (lancer_chip(adapter))
2704 break;
2705
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302706 if (flush_wait++ > 50 ||
2707 be_check_error(adapter,
2708 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002709 dev_warn(&adapter->pdev->dev,
2710 "did not receive flush compl\n");
2711 break;
2712 }
2713 be_cq_notify(adapter, rx_cq->id, true, 0);
2714 mdelay(1);
2715 } else {
2716 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002717 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002718 if (rxcp->num_rcvd == 0)
2719 break;
2720 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002721 }
2722
Sathya Perlad23e9462012-12-17 19:38:51 +00002723 /* After cleanup, leave the CQ in unarmed state */
2724 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002725}
2726
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002727static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002728{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002729 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302730 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302731 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002732 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302733 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302734 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002735 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002736
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302737 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002738 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002739 pending_txqs = adapter->num_tx_qs;
2740
2741 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302742 cmpl = 0;
2743 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002744 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302745 while ((txcp = be_tx_compl_get(txo))) {
2746 num_wrbs +=
2747 be_tx_compl_process(adapter, txo,
2748 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002749 cmpl++;
2750 }
2751 if (cmpl) {
2752 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2753 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302754 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002755 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302756 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002757 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002758 }
2759
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302760 if (pending_txqs == 0 || ++timeo > 10 ||
2761 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002762 break;
2763
2764 mdelay(1);
2765 } while (true);
2766
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002767 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002768 for_all_tx_queues(adapter, txo, i) {
2769 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002770
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002771 if (atomic_read(&txq->used)) {
2772 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2773 i, atomic_read(&txq->used));
2774 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002775 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002776 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2777 txq->len);
2778 /* Use the tx-compl process logic to handle requests
2779 * that were not sent to the HW.
2780 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002781 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2782 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002783 BUG_ON(atomic_read(&txq->used));
2784 txo->pend_wrb_cnt = 0;
2785 /* Since hw was never notified of these requests,
2786 * reset TXQ indices
2787 */
2788 txq->head = notified_idx;
2789 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002790 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002791 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002792}
2793
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002794static void be_evt_queues_destroy(struct be_adapter *adapter)
2795{
2796 struct be_eq_obj *eqo;
2797 int i;
2798
2799 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002800 if (eqo->q.created) {
2801 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002802 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302803 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002804 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002805 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002806 be_queue_free(adapter, &eqo->q);
2807 }
2808}
2809
2810static int be_evt_queues_create(struct be_adapter *adapter)
2811{
2812 struct be_queue_info *eq;
2813 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302814 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002815 int i, rc;
2816
Sathya Perlae2617682016-06-22 08:54:54 -04002817 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302818 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002819 max(adapter->cfg_num_rx_irqs,
2820 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002821
2822 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302823 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002824
Sathya Perla2632baf2013-10-01 16:00:00 +05302825 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002826 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002827 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302828 aic->max_eqd = BE_MAX_EQD;
2829 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002830
2831 eq = &eqo->q;
2832 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302833 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834 if (rc)
2835 return rc;
2836
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302837 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002838 if (rc)
2839 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002840
2841 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2842 return -ENOMEM;
2843 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2844 eqo->affinity_mask);
2845 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2846 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002847 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002848 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002849}
2850
Sathya Perla5fb379e2009-06-18 00:02:59 +00002851static void be_mcc_queues_destroy(struct be_adapter *adapter)
2852{
2853 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002854
Sathya Perla8788fdc2009-07-27 22:52:03 +00002855 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002856 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002857 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002858 be_queue_free(adapter, q);
2859
Sathya Perla8788fdc2009-07-27 22:52:03 +00002860 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002861 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002862 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002863 be_queue_free(adapter, q);
2864}
2865
2866/* Must be called only after TX qs are created as MCC shares TX EQ */
2867static int be_mcc_queues_create(struct be_adapter *adapter)
2868{
2869 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002870
Sathya Perla8788fdc2009-07-27 22:52:03 +00002871 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002872 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302873 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002874 goto err;
2875
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002876 /* Use the default EQ for MCC completions */
2877 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002878 goto mcc_cq_free;
2879
Sathya Perla8788fdc2009-07-27 22:52:03 +00002880 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002881 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2882 goto mcc_cq_destroy;
2883
Sathya Perla8788fdc2009-07-27 22:52:03 +00002884 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002885 goto mcc_q_free;
2886
2887 return 0;
2888
2889mcc_q_free:
2890 be_queue_free(adapter, q);
2891mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002892 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002893mcc_cq_free:
2894 be_queue_free(adapter, cq);
2895err:
2896 return -1;
2897}
2898
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899static void be_tx_queues_destroy(struct be_adapter *adapter)
2900{
2901 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002902 struct be_tx_obj *txo;
2903 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002904
Sathya Perla3c8def92011-06-12 20:01:58 +00002905 for_all_tx_queues(adapter, txo, i) {
2906 q = &txo->q;
2907 if (q->created)
2908 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2909 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002910
Sathya Perla3c8def92011-06-12 20:01:58 +00002911 q = &txo->cq;
2912 if (q->created)
2913 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2914 be_queue_free(adapter, q);
2915 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002916}
2917
Sathya Perla77071332013-08-27 16:57:34 +05302918static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002919{
Sathya Perla73f394e2015-03-26 03:05:09 -04002920 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002921 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002922 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302923 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924
Sathya Perlae2617682016-06-22 08:54:54 -04002925 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002926
Sathya Perla3c8def92011-06-12 20:01:58 +00002927 for_all_tx_queues(adapter, txo, i) {
2928 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002929 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2930 sizeof(struct be_eth_tx_compl));
2931 if (status)
2932 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933
John Stultz827da442013-10-07 15:51:58 -07002934 u64_stats_init(&txo->stats.sync);
2935 u64_stats_init(&txo->stats.sync_compl);
2936
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002937 /* If num_evt_qs is less than num_tx_qs, then more than
2938 * one txq share an eq
2939 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002940 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2941 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002942 if (status)
2943 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002945 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2946 sizeof(struct be_eth_wrb));
2947 if (status)
2948 return status;
2949
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002950 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002951 if (status)
2952 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002953
2954 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2955 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002956 }
2957
Sathya Perlad3791422012-09-28 04:39:44 +00002958 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2959 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 return 0;
2961}
2962
2963static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002964{
2965 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002966 struct be_rx_obj *rxo;
2967 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968
Sathya Perla3abcded2010-10-03 22:12:27 -07002969 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002970 q = &rxo->cq;
2971 if (q->created)
2972 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2973 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002974 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002975}
2976
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002977static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002978{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002979 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002980 struct be_rx_obj *rxo;
2981 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002982
Sathya Perlae2617682016-06-22 08:54:54 -04002983 adapter->num_rss_qs =
2984 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05302985
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002986 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04002987 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002988 adapter->num_rss_qs = 0;
2989
2990 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2991
2992 /* When the interface is not capable of RSS rings (and there is no
2993 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002994 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002995 if (adapter->num_rx_qs == 0)
2996 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302997
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002999 for_all_rx_queues(adapter, rxo, i) {
3000 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07003001 cq = &rxo->cq;
3002 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05303003 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07003004 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003005 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006
John Stultz827da442013-10-07 15:51:58 -07003007 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003008 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3009 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07003010 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003011 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07003012 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003013
Sathya Perlad3791422012-09-28 04:39:44 +00003014 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003015 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003016 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00003017}
3018
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019static irqreturn_t be_intx(int irq, void *dev)
3020{
Sathya Perlae49cc342012-11-27 19:50:02 +00003021 struct be_eq_obj *eqo = dev;
3022 struct be_adapter *adapter = eqo->adapter;
3023 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003024
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003025 /* IRQ is not expected when NAPI is scheduled as the EQ
3026 * will not be armed.
3027 * But, this can happen on Lancer INTx where it takes
3028 * a while to de-assert INTx or in BE2 where occasionaly
3029 * an interrupt may be raised even when EQ is unarmed.
3030 * If NAPI is already scheduled, then counting & notifying
3031 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00003032 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003033 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003034 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003035 __napi_schedule(&eqo->napi);
3036 if (num_evts)
3037 eqo->spurious_intr = 0;
3038 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003039 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003040
3041 /* Return IRQ_HANDLED only for the the first spurious intr
3042 * after a valid intr to stop the kernel from branding
3043 * this irq as a bad one!
3044 */
3045 if (num_evts || eqo->spurious_intr++ == 0)
3046 return IRQ_HANDLED;
3047 else
3048 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003049}
3050
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003051static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003053 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003054
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003055 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00003056 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003057 return IRQ_HANDLED;
3058}
3059
Sathya Perla2e588f82011-03-11 02:49:26 +00003060static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003061{
Somnath Koture38b1702013-05-29 22:55:56 +00003062 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003063}
3064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003065static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Eric Dumazetfb6113e2017-02-02 10:16:00 -08003066 int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067{
Sathya Perla3abcded2010-10-03 22:12:27 -07003068 struct be_adapter *adapter = rxo->adapter;
3069 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00003070 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05303072 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073
3074 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07003075 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003076 if (!rxcp)
3077 break;
3078
Sathya Perla12004ae2011-08-02 19:57:46 +00003079 /* Is it a flush compl that has no data */
3080 if (unlikely(rxcp->num_rcvd == 0))
3081 goto loop_continue;
3082
3083 /* Discard compl with partial DMA Lancer B0 */
3084 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003085 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003086 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00003087 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00003088
Sathya Perla12004ae2011-08-02 19:57:46 +00003089 /* On BE drop pkts that arrive due to imperfect filtering in
3090 * promiscuous mode on some skews
3091 */
3092 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05303093 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003094 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003095 goto loop_continue;
3096 }
3097
Eric Dumazetfb6113e2017-02-02 10:16:00 -08003098 if (do_gro(rxcp))
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003099 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003100 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05303101 be_rx_compl_process(rxo, napi, rxcp);
3102
Sathya Perla12004ae2011-08-02 19:57:46 +00003103loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05303104 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00003105 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003106 }
3107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003108 if (work_done) {
3109 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003110
Sathya Perla6384a4d2013-10-25 10:40:16 +05303111 /* When an rx-obj gets into post_starved state, just
3112 * let be_worker do the posting.
3113 */
3114 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3115 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303116 be_post_rx_frags(rxo, GFP_ATOMIC,
3117 max_t(u32, MAX_RX_POST,
3118 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003119 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003120
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003121 return work_done;
3122}
3123
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303124static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303125{
3126 switch (status) {
3127 case BE_TX_COMP_HDR_PARSE_ERR:
3128 tx_stats(txo)->tx_hdr_parse_err++;
3129 break;
3130 case BE_TX_COMP_NDMA_ERR:
3131 tx_stats(txo)->tx_dma_err++;
3132 break;
3133 case BE_TX_COMP_ACL_ERR:
3134 tx_stats(txo)->tx_spoof_check_err++;
3135 break;
3136 }
3137}
3138
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303139static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303140{
3141 switch (status) {
3142 case LANCER_TX_COMP_LSO_ERR:
3143 tx_stats(txo)->tx_tso_err++;
3144 break;
3145 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3146 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3147 tx_stats(txo)->tx_spoof_check_err++;
3148 break;
3149 case LANCER_TX_COMP_QINQ_ERR:
3150 tx_stats(txo)->tx_qinq_err++;
3151 break;
3152 case LANCER_TX_COMP_PARITY_ERR:
3153 tx_stats(txo)->tx_internal_parity_err++;
3154 break;
3155 case LANCER_TX_COMP_DMA_ERR:
3156 tx_stats(txo)->tx_dma_err++;
3157 break;
3158 }
3159}
3160
Sathya Perlac8f64612014-09-02 09:56:55 +05303161static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3162 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003163{
Sathya Perlac8f64612014-09-02 09:56:55 +05303164 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303165 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003166
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303167 while ((txcp = be_tx_compl_get(txo))) {
3168 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303169 work_done++;
3170
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303171 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303172 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303173 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303174 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303175 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303176 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003177 }
3178
3179 if (work_done) {
3180 be_cq_notify(adapter, txo->cq.id, true, work_done);
3181 atomic_sub(num_wrbs, &txo->q.used);
3182
3183 /* As Tx wrbs have been freed up, wake up netdev queue
3184 * if it was stopped due to lack of tx wrbs. */
3185 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303186 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003187 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003188 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003189
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003190 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3191 tx_stats(txo)->tx_compl += work_done;
3192 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3193 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003194}
Sathya Perla3c8def92011-06-12 20:01:58 +00003195
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303196int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003197{
3198 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3199 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003200 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303201 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303202 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003203 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003204
Sathya Perla0b545a62012-11-23 00:27:18 +00003205 num_evts = events_get(eqo);
3206
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303207 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3208 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003209
Eric Dumazetfb6113e2017-02-02 10:16:00 -08003210 /* This loop will iterate twice for EQ0 in which
3211 * completions of the last RXQ (default one) are also processed
3212 * For other EQs the loop iterates only once
3213 */
3214 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3215 work = be_process_rx(rxo, napi, budget);
3216 max_work = max(work, max_work);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003217 }
3218
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003219 if (is_mcc_eqo(eqo))
3220 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003221
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003222 if (max_work < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08003223 napi_complete_done(napi, max_work);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003224
3225 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3226 * delay via a delay multiplier encoding value
3227 */
3228 if (skyhawk_chip(adapter))
3229 mult_enc = be_get_eq_delay_mult_enc(eqo);
3230
3231 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3232 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003233 } else {
3234 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003235 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003236 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003237 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238}
3239
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003240void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003241{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003242 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3243 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003244 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303245 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003246
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303247 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003248 return;
3249
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003250 if (lancer_chip(adapter)) {
3251 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3252 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303253 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003254 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303255 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003256 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303257 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303258 /* Do not log error messages if its a FW reset */
3259 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3260 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3261 dev_info(dev, "Firmware update in progress\n");
3262 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303263 dev_err(dev, "Error detected in the card\n");
3264 dev_err(dev, "ERR: sliport status 0x%x\n",
3265 sliport_status);
3266 dev_err(dev, "ERR: sliport error1 0x%x\n",
3267 sliport_err1);
3268 dev_err(dev, "ERR: sliport error2 0x%x\n",
3269 sliport_err2);
3270 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003271 }
3272 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003273 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3274 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3275 ue_lo_mask = ioread32(adapter->pcicfg +
3276 PCICFG_UE_STATUS_LOW_MASK);
3277 ue_hi_mask = ioread32(adapter->pcicfg +
3278 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003279
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003280 ue_lo = (ue_lo & ~ue_lo_mask);
3281 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003282
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303283 /* On certain platforms BE hardware can indicate spurious UEs.
3284 * Allow HW to stop working completely in case of a real UE.
3285 * Hence not setting the hw_error for UE detection.
3286 */
3287
3288 if (ue_lo || ue_hi) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303289 dev_err(dev, "Error detected in the adapter");
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303290 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303291 be_set_error(adapter, BE_ERROR_UE);
3292
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303293 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3294 if (ue_lo & 1)
3295 dev_err(dev, "UE: %s bit set\n",
3296 ue_status_low_desc[i]);
3297 }
3298 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3299 if (ue_hi & 1)
3300 dev_err(dev, "UE: %s bit set\n",
3301 ue_status_hi_desc[i]);
3302 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303303 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003304 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003305}
3306
Sathya Perla8d56ff12009-11-22 22:02:26 +00003307static void be_msix_disable(struct be_adapter *adapter)
3308{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003309 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003310 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003311 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303312 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003313 }
3314}
3315
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003316static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003317{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003318 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003319 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003320 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003321
Sathya Perlace7faf02016-06-22 08:54:53 -04003322 /* If RoCE is supported, program the max number of vectors that
3323 * could be used for NIC and RoCE, else, just program the number
3324 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303325 */
Sathya Perlae2617682016-06-22 08:54:54 -04003326 if (be_roce_supported(adapter)) {
3327 max_roce_eqs =
3328 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3329 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3330 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3331 } else {
3332 num_vec = max(adapter->cfg_num_rx_irqs,
3333 adapter->cfg_num_tx_irqs);
3334 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003335
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003336 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003337 adapter->msix_entries[i].entry = i;
3338
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003339 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3340 MIN_MSIX_VECTORS, num_vec);
3341 if (num_vec < 0)
3342 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003343
Sathya Perla92bf14a2013-08-27 16:57:32 +05303344 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3345 adapter->num_msix_roce_vec = num_vec / 2;
3346 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3347 adapter->num_msix_roce_vec);
3348 }
3349
3350 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3351
3352 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3353 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003354 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003355
3356fail:
3357 dev_warn(dev, "MSIx enable failed\n");
3358
3359 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003360 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003361 return num_vec;
3362 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003363}
3364
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003365static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303366 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003367{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303368 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003369}
3370
3371static int be_msix_register(struct be_adapter *adapter)
3372{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003373 struct net_device *netdev = adapter->netdev;
3374 struct be_eq_obj *eqo;
3375 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003376
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003377 for_all_evt_queues(adapter, eqo, i) {
3378 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3379 vec = be_msix_vec_get(adapter, eqo);
3380 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003381 if (status)
3382 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003383
3384 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003385 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003386
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003387 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003388err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303389 for (i--; i >= 0; i--) {
3390 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003391 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303392 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003393 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303394 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003395 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003396 return status;
3397}
3398
3399static int be_irq_register(struct be_adapter *adapter)
3400{
3401 struct net_device *netdev = adapter->netdev;
3402 int status;
3403
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003404 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003405 status = be_msix_register(adapter);
3406 if (status == 0)
3407 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003408 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003409 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003410 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003411 }
3412
Sathya Perlae49cc342012-11-27 19:50:02 +00003413 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003414 netdev->irq = adapter->pdev->irq;
3415 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003416 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003417 if (status) {
3418 dev_err(&adapter->pdev->dev,
3419 "INTx request IRQ failed - err %d\n", status);
3420 return status;
3421 }
3422done:
3423 adapter->isr_registered = true;
3424 return 0;
3425}
3426
3427static void be_irq_unregister(struct be_adapter *adapter)
3428{
3429 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003430 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003431 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003432
3433 if (!adapter->isr_registered)
3434 return;
3435
3436 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003437 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003438 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003439 goto done;
3440 }
3441
3442 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003443 for_all_evt_queues(adapter, eqo, i) {
3444 vec = be_msix_vec_get(adapter, eqo);
3445 irq_set_affinity_hint(vec, NULL);
3446 free_irq(vec, eqo);
3447 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003448
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449done:
3450 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003451}
3452
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003453static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003454{
Ajit Khaparde62219062016-02-10 22:45:53 +05303455 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003456 struct be_queue_info *q;
3457 struct be_rx_obj *rxo;
3458 int i;
3459
3460 for_all_rx_queues(adapter, rxo, i) {
3461 q = &rxo->q;
3462 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003463 /* If RXQs are destroyed while in an "out of buffer"
3464 * state, there is a possibility of an HW stall on
3465 * Lancer. So, post 64 buffers to each queue to relieve
3466 * the "out of buffer" condition.
3467 * Make sure there's space in the RXQ before posting.
3468 */
3469 if (lancer_chip(adapter)) {
3470 be_rx_cq_clean(rxo);
3471 if (atomic_read(&q->used) == 0)
3472 be_post_rx_frags(rxo, GFP_KERNEL,
3473 MAX_RX_POST);
3474 }
3475
Sathya Perla482c9e72011-06-29 23:33:17 +00003476 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003477 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003478 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003479 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003480 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003481 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303482
3483 if (rss->rss_flags) {
3484 rss->rss_flags = RSS_ENABLE_NONE;
3485 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3486 128, rss->rss_hkey);
3487 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003488}
3489
Kalesh APbcc84142015-08-05 03:27:48 -04003490static void be_disable_if_filters(struct be_adapter *adapter)
3491{
Ivan Vecera6d928ae2017-01-13 22:38:28 +01003492 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3493 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
Ivan Vecera4993b392017-01-31 20:01:31 +01003494 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
Ivan Vecera6d928ae2017-01-13 22:38:28 +01003495 be_dev_mac_del(adapter, adapter->pmac_id[0]);
Ivan Vecera4993b392017-01-31 20:01:31 +01003496 eth_zero_addr(adapter->dev_mac);
3497 }
Ivan Vecera6d928ae2017-01-13 22:38:28 +01003498
Kalesh APbcc84142015-08-05 03:27:48 -04003499 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003500 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003501
3502 /* The IFACE flags are enabled in the open path and cleared
3503 * in the close path. When a VF gets detached from the host and
3504 * assigned to a VM the following happens:
3505 * - VF's IFACE flags get cleared in the detach path
3506 * - IFACE create is issued by the VF in the attach path
3507 * Due to a bug in the BE3/Skyhawk-R FW
3508 * (Lancer FW doesn't have the bug), the IFACE capability flags
3509 * specified along with the IFACE create cmd issued by a VF are not
3510 * honoured by FW. As a consequence, if a *new* driver
3511 * (that enables/disables IFACE flags in open/close)
3512 * is loaded in the host and an *old* driver is * used by a VM/VF,
3513 * the IFACE gets created *without* the needed flags.
3514 * To avoid this, disable RX-filter flags only for Lancer.
3515 */
3516 if (lancer_chip(adapter)) {
3517 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3518 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3519 }
3520}
3521
Sathya Perla889cd4b2010-05-30 23:33:45 +00003522static int be_close(struct net_device *netdev)
3523{
3524 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003525 struct be_eq_obj *eqo;
3526 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003527
Kalesh APe1ad8e32014-04-14 16:12:41 +05303528 /* This protection is needed as be_close() may be called even when the
3529 * adapter is in cleared state (after eeh perm failure)
3530 */
3531 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3532 return 0;
3533
Sathya Perlab7172412016-07-27 05:26:18 -04003534 /* Before attempting cleanup ensure all the pending cmds in the
3535 * config_wq have finished execution
3536 */
3537 flush_workqueue(be_wq);
3538
Kalesh APbcc84142015-08-05 03:27:48 -04003539 be_disable_if_filters(adapter);
3540
Ivan Veceradff345c52013-11-27 08:59:32 +01003541 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3542 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003543 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303544 }
David S. Miller71237b62013-11-28 18:53:36 -05003545 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003546 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003547
3548 be_async_mcc_disable(adapter);
3549
3550 /* Wait for all pending tx completions to arrive so that
3551 * all tx skbs are freed.
3552 */
Sathya Perlafba87552013-05-08 02:05:50 +00003553 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303554 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003555
3556 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003557
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003558 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003559 if (msix_enabled(adapter))
3560 synchronize_irq(be_msix_vec_get(adapter, eqo));
3561 else
3562 synchronize_irq(netdev->irq);
3563 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003564 }
3565
Sathya Perla889cd4b2010-05-30 23:33:45 +00003566 be_irq_unregister(adapter);
3567
Sathya Perla482c9e72011-06-29 23:33:17 +00003568 return 0;
3569}
3570
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003571static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003572{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003573 struct rss_info *rss = &adapter->rss_info;
3574 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003575 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003576 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003577
3578 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003579 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3580 sizeof(struct be_eth_rx_d));
3581 if (rc)
3582 return rc;
3583 }
3584
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003585 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3586 rxo = default_rxo(adapter);
3587 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3588 rx_frag_size, adapter->if_handle,
3589 false, &rxo->rss_id);
3590 if (rc)
3591 return rc;
3592 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003593
3594 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003595 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003596 rx_frag_size, adapter->if_handle,
3597 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003598 if (rc)
3599 return rc;
3600 }
3601
3602 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003603 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003604 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303605 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003606 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303607 rss->rsstable[j + i] = rxo->rss_id;
3608 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003609 }
3610 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303611 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3612 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003613
3614 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303615 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3616 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303617
3618 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3619 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3620 RSS_INDIR_TABLE_LEN, rss_key);
3621 if (rc) {
3622 rss->rss_flags = RSS_ENABLE_NONE;
3623 return rc;
3624 }
3625
3626 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303627 } else {
3628 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303629 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303630 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003631
Venkata Duvvurue2557872014-04-21 15:38:00 +05303632
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003633 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3634 * which is a queue empty condition
3635 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003636 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003637 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3638
Sathya Perla889cd4b2010-05-30 23:33:45 +00003639 return 0;
3640}
3641
Kalesh APbcc84142015-08-05 03:27:48 -04003642static int be_enable_if_filters(struct be_adapter *adapter)
3643{
3644 int status;
3645
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003646 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003647 if (status)
3648 return status;
3649
Ivan Vecera4993b392017-01-31 20:01:31 +01003650 /* Normally this condition usually true as the ->dev_mac is zeroed.
3651 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3652 * subsequent be_dev_mac_add() can fail (after fresh boot)
3653 */
3654 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3655 int old_pmac_id = -1;
3656
3657 /* Remember old programmed MAC if any - can happen on BE3 VF */
3658 if (!is_zero_ether_addr(adapter->dev_mac))
3659 old_pmac_id = adapter->pmac_id[0];
3660
Suresh Reddy988d44b2016-09-07 19:57:52 +05303661 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003662 if (status)
3663 return status;
Ivan Vecera4993b392017-01-31 20:01:31 +01003664
3665 /* Delete the old programmed MAC as we successfully programmed
3666 * a new MAC
3667 */
3668 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3669 be_dev_mac_del(adapter, old_pmac_id);
3670
Suresh Reddyc27ebf52016-09-07 19:57:53 +05303671 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003672 }
3673
3674 if (adapter->vlans_added)
3675 be_vid_config(adapter);
3676
Sathya Perlab7172412016-07-27 05:26:18 -04003677 __be_set_rx_mode(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003678
3679 return 0;
3680}
3681
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003682static int be_open(struct net_device *netdev)
3683{
3684 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003685 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003686 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003687 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003688 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003689 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003690
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003691 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003692 if (status)
3693 goto err;
3694
Kalesh APbcc84142015-08-05 03:27:48 -04003695 status = be_enable_if_filters(adapter);
3696 if (status)
3697 goto err;
3698
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003699 status = be_irq_register(adapter);
3700 if (status)
3701 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003702
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003703 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003704 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003705
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003706 for_all_tx_queues(adapter, txo, i)
3707 be_cq_notify(adapter, txo->cq.id, true, 0);
3708
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003709 be_async_mcc_enable(adapter);
3710
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003711 for_all_evt_queues(adapter, eqo, i) {
3712 napi_enable(&eqo->napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003713 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003714 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003715 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003716
Sathya Perla323ff712012-09-28 04:39:43 +00003717 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003718 if (!status)
3719 be_link_status_update(adapter, link_status);
3720
Sathya Perlafba87552013-05-08 02:05:50 +00003721 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303722 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003723 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303724
Sathya Perla889cd4b2010-05-30 23:33:45 +00003725 return 0;
3726err:
3727 be_close(adapter->netdev);
3728 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003729}
3730
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003731static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3732{
3733 u32 addr;
3734
3735 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3736
3737 mac[5] = (u8)(addr & 0xFF);
3738 mac[4] = (u8)((addr >> 8) & 0xFF);
3739 mac[3] = (u8)((addr >> 16) & 0xFF);
3740 /* Use the OUI from the current MAC address */
3741 memcpy(mac, adapter->netdev->dev_addr, 3);
3742}
3743
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003744/*
3745 * Generate a seed MAC address from the PF MAC Address using jhash.
3746 * MAC Address for VFs are assigned incrementally starting from the seed.
3747 * These addresses are programmed in the ASIC by the PF and the VF driver
3748 * queries for the MAC address during its probe.
3749 */
Sathya Perla4c876612013-02-03 20:30:11 +00003750static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003751{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003752 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003753 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003754 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003755 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003756
3757 be_vf_eth_addr_generate(adapter, mac);
3758
Sathya Perla11ac75e2011-12-13 00:58:50 +00003759 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303760 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003761 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003762 vf_cfg->if_handle,
3763 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303764 else
3765 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3766 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003767
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003768 if (status)
3769 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303770 "Mac address assignment failed for VF %d\n",
3771 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003772 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003773 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003774
3775 mac[5] += 1;
3776 }
3777 return status;
3778}
3779
Sathya Perla4c876612013-02-03 20:30:11 +00003780static int be_vfs_mac_query(struct be_adapter *adapter)
3781{
3782 int status, vf;
3783 u8 mac[ETH_ALEN];
3784 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003785
3786 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303787 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3788 mac, vf_cfg->if_handle,
3789 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003790 if (status)
3791 return status;
3792 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3793 }
3794 return 0;
3795}
3796
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003797static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003798{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003799 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003800 u32 vf;
3801
Sathya Perla257a3fe2013-06-14 15:54:51 +05303802 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003803 dev_warn(&adapter->pdev->dev,
3804 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003805 goto done;
3806 }
3807
Sathya Perlab4c1df92013-05-08 02:05:47 +00003808 pci_disable_sriov(adapter->pdev);
3809
Sathya Perla11ac75e2011-12-13 00:58:50 +00003810 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303811 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003812 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3813 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303814 else
3815 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3816 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003817
Sathya Perla11ac75e2011-12-13 00:58:50 +00003818 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3819 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003820
3821 if (BE3_chip(adapter))
3822 be_cmd_set_hsw_config(adapter, 0, 0,
3823 adapter->if_handle,
3824 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003825done:
3826 kfree(adapter->vf_cfg);
3827 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303828 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003829}
3830
Sathya Perla77071332013-08-27 16:57:34 +05303831static void be_clear_queues(struct be_adapter *adapter)
3832{
3833 be_mcc_queues_destroy(adapter);
3834 be_rx_cqs_destroy(adapter);
3835 be_tx_queues_destroy(adapter);
3836 be_evt_queues_destroy(adapter);
3837}
3838
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303839static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003840{
Sathya Perla191eb752012-02-23 18:50:13 +00003841 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3842 cancel_delayed_work_sync(&adapter->work);
3843 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3844 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303845}
3846
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003847static void be_cancel_err_detection(struct be_adapter *adapter)
3848{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303849 struct be_error_recovery *err_rec = &adapter->error_recovery;
3850
3851 if (!be_err_recovery_workq)
3852 return;
3853
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003854 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303855 cancel_delayed_work_sync(&err_rec->err_detection_work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003856 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3857 }
3858}
3859
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05303860static int be_enable_vxlan_offloads(struct be_adapter *adapter)
3861{
3862 struct net_device *netdev = adapter->netdev;
3863 struct device *dev = &adapter->pdev->dev;
3864 struct be_vxlan_port *vxlan_port;
3865 __be16 port;
3866 int status;
3867
3868 vxlan_port = list_first_entry(&adapter->vxlan_port_list,
3869 struct be_vxlan_port, list);
3870 port = vxlan_port->port;
3871
3872 status = be_cmd_manage_iface(adapter, adapter->if_handle,
3873 OP_CONVERT_NORMAL_TO_TUNNEL);
3874 if (status) {
3875 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
3876 return status;
3877 }
3878 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
3879
3880 status = be_cmd_set_vxlan_port(adapter, port);
3881 if (status) {
3882 dev_warn(dev, "Failed to add VxLAN port\n");
3883 return status;
3884 }
3885 adapter->vxlan_port = port;
3886
3887 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3888 NETIF_F_TSO | NETIF_F_TSO6 |
3889 NETIF_F_GSO_UDP_TUNNEL;
3890 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
3891 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
3892
3893 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
3894 be16_to_cpu(port));
3895 return 0;
3896}
3897
Sathya Perlac9c47142014-03-27 10:46:19 +05303898static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3899{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003900 struct net_device *netdev = adapter->netdev;
3901
Sathya Perlac9c47142014-03-27 10:46:19 +05303902 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3903 be_cmd_manage_iface(adapter, adapter->if_handle,
3904 OP_CONVERT_TUNNEL_TO_NORMAL);
3905
3906 if (adapter->vxlan_port)
3907 be_cmd_set_vxlan_port(adapter, 0);
3908
3909 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3910 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003911
3912 netdev->hw_enc_features = 0;
3913 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303914 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303915}
3916
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003917static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3918 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05003919{
3920 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003921 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3922 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003923 u16 num_vf_qs = 1;
3924
Somnath Koturde2b1e02016-06-06 07:22:10 -04003925 /* Distribute the queue resources among the PF and it's VFs */
3926 if (num_vfs) {
3927 /* Divide the rx queues evenly among the VFs and the PF, capped
3928 * at VF-EQ-count. Any remainder queues belong to the PF.
3929 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05303930 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3931 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05003932
Somnath Koturde2b1e02016-06-06 07:22:10 -04003933 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
3934 * RSS Tables per port. Provide RSS on VFs, only if number of
3935 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05003936 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04003937 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05003938 num_vf_qs = 1;
3939 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003940
3941 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3942 * which are modifiable using SET_PROFILE_CONFIG cmd.
3943 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04003944 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
3945 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003946
3947 /* If RSS IFACE capability flags are modifiable for a VF, set the
3948 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3949 * more than 1 RSSQ is available for a VF.
3950 * Otherwise, provision only 1 queue pair for VF.
3951 */
3952 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3953 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3954 if (num_vf_qs > 1) {
3955 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3956 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3957 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3958 } else {
3959 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3960 BE_IF_FLAGS_DEFQ_RSS);
3961 }
3962 } else {
3963 num_vf_qs = 1;
3964 }
3965
3966 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3967 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3968 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3969 }
3970
3971 vft_res->vf_if_cap_flags = vf_if_cap_flags;
3972 vft_res->max_rx_qs = num_vf_qs;
3973 vft_res->max_rss_qs = num_vf_qs;
3974 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
3975 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
3976
3977 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3978 * among the PF and it's VFs, if the fields are changeable
3979 */
3980 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3981 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
3982
3983 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3984 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
3985
3986 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3987 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
3988
3989 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3990 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05003991}
3992
Sathya Perlab7172412016-07-27 05:26:18 -04003993static void be_if_destroy(struct be_adapter *adapter)
3994{
3995 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3996
3997 kfree(adapter->pmac_id);
3998 adapter->pmac_id = NULL;
3999
4000 kfree(adapter->mc_list);
4001 adapter->mc_list = NULL;
4002
4003 kfree(adapter->uc_list);
4004 adapter->uc_list = NULL;
4005}
4006
Somnath Koturb05004a2013-12-05 12:08:16 +05304007static int be_clear(struct be_adapter *adapter)
4008{
Vasundhara Volamf2858732015-03-04 00:44:33 -05004009 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004010 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004011
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304012 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00004013
Sathya Perlab7172412016-07-27 05:26:18 -04004014 flush_workqueue(be_wq);
4015
Sathya Perla11ac75e2011-12-13 00:58:50 +00004016 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004017 be_vf_clear(adapter);
4018
Vasundhara Volambec84e62014-06-30 13:01:32 +05304019 /* Re-configure FW to distribute resources evenly across max-supported
4020 * number of VFs, only when VFs are not already enabled.
4021 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05004022 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4023 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004024 be_calculate_vf_res(adapter,
4025 pci_sriov_get_totalvfs(pdev),
4026 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304027 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004028 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004029 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004030 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304031
Sathya Perlac9c47142014-03-27 10:46:19 +05304032 be_disable_vxlan_offloads(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004033
Sathya Perlab7172412016-07-27 05:26:18 -04004034 be_if_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004035
Sathya Perla77071332013-08-27 16:57:34 +05304036 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004037
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004038 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304039 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004040 return 0;
4041}
4042
Sathya Perla4c876612013-02-03 20:30:11 +00004043static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004044{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304045 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04004046 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00004047 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05004048 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004049
Kalesh AP0700d812015-01-20 03:51:43 -05004050 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004051 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004052
Sathya Perla4c876612013-02-03 20:30:11 +00004053 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304054 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04004055 status = be_cmd_get_profile_config(adapter, &res, NULL,
4056 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004057 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05304058 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004059 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304060 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004061 /* Prevent VFs from enabling VLAN promiscuous
4062 * mode
4063 */
4064 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4065 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304066 }
Sathya Perla4c876612013-02-03 20:30:11 +00004067
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004068 /* PF should enable IF flags during proxy if_create call */
4069 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004070 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4071 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004072 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004073 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004074 }
Kalesh AP0700d812015-01-20 03:51:43 -05004075
4076 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004077}
4078
Sathya Perla39f1d942012-05-08 19:41:24 +00004079static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004080{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004081 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004082 int vf;
4083
Sathya Perla39f1d942012-05-08 19:41:24 +00004084 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4085 GFP_KERNEL);
4086 if (!adapter->vf_cfg)
4087 return -ENOMEM;
4088
Sathya Perla11ac75e2011-12-13 00:58:50 +00004089 for_all_vfs(adapter, vf_cfg, vf) {
4090 vf_cfg->if_handle = -1;
4091 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004092 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004093 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004094}
4095
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004096static int be_vf_setup(struct be_adapter *adapter)
4097{
Sathya Perla4c876612013-02-03 20:30:11 +00004098 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304099 struct be_vf_cfg *vf_cfg;
4100 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004101 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004102
Sathya Perla257a3fe2013-06-14 15:54:51 +05304103 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004104
4105 status = be_vf_setup_init(adapter);
4106 if (status)
4107 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004108
Sathya Perla4c876612013-02-03 20:30:11 +00004109 if (old_vfs) {
4110 for_all_vfs(adapter, vf_cfg, vf) {
4111 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4112 if (status)
4113 goto err;
4114 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004115
Sathya Perla4c876612013-02-03 20:30:11 +00004116 status = be_vfs_mac_query(adapter);
4117 if (status)
4118 goto err;
4119 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304120 status = be_vfs_if_create(adapter);
4121 if (status)
4122 goto err;
4123
Sathya Perla39f1d942012-05-08 19:41:24 +00004124 status = be_vf_eth_addr_config(adapter);
4125 if (status)
4126 goto err;
4127 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004128
Sathya Perla11ac75e2011-12-13 00:58:50 +00004129 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304130 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004131 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4132 vf + 1);
4133 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304134 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004135 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304136 BE_PRIV_FILTMGMT,
4137 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004138 if (!status) {
4139 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304140 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4141 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004142 }
Sathya Perla04a06022013-07-23 15:25:00 +05304143 }
4144
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304145 /* Allow full available bandwidth */
4146 if (!old_vfs)
4147 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004148
Kalesh APe7bcbd72015-05-06 05:30:32 -04004149 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4150 vf_cfg->if_handle, NULL,
4151 &spoofchk);
4152 if (!status)
4153 vf_cfg->spoofchk = spoofchk;
4154
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304155 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304156 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304157 be_cmd_set_logical_link_config(adapter,
4158 IFLA_VF_LINK_STATE_AUTO,
4159 vf+1);
4160 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004161 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004162
4163 if (!old_vfs) {
4164 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4165 if (status) {
4166 dev_err(dev, "SRIOV enable failed\n");
4167 adapter->num_vfs = 0;
4168 goto err;
4169 }
4170 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304171
Somnath Kotur884476b2016-06-22 08:54:55 -04004172 if (BE3_chip(adapter)) {
4173 /* On BE3, enable VEB only when SRIOV is enabled */
4174 status = be_cmd_set_hsw_config(adapter, 0, 0,
4175 adapter->if_handle,
4176 PORT_FWD_TYPE_VEB, 0);
4177 if (status)
4178 goto err;
4179 }
4180
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304181 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004182 return 0;
4183err:
Sathya Perla4c876612013-02-03 20:30:11 +00004184 dev_err(dev, "VF setup failed\n");
4185 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004186 return status;
4187}
4188
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304189/* Converting function_mode bits on BE3 to SH mc_type enums */
4190
4191static u8 be_convert_mc_type(u32 function_mode)
4192{
Suresh Reddy66064db2014-06-23 16:41:29 +05304193 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304194 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304195 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304196 return FLEX10;
4197 else if (function_mode & VNIC_MODE)
4198 return vNIC2;
4199 else if (function_mode & UMC_ENABLED)
4200 return UMC;
4201 else
4202 return MC_NONE;
4203}
4204
Sathya Perla92bf14a2013-08-27 16:57:32 +05304205/* On BE2/BE3 FW does not suggest the supported limits */
4206static void BEx_get_resources(struct be_adapter *adapter,
4207 struct be_resources *res)
4208{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304209 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304210
4211 if (be_physfn(adapter))
4212 res->max_uc_mac = BE_UC_PMAC_COUNT;
4213 else
4214 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4215
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304216 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4217
4218 if (be_is_mc(adapter)) {
4219 /* Assuming that there are 4 channels per port,
4220 * when multi-channel is enabled
4221 */
4222 if (be_is_qnq_mode(adapter))
4223 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4224 else
4225 /* In a non-qnq multichannel mode, the pvid
4226 * takes up one vlan entry
4227 */
4228 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4229 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304230 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304231 }
4232
Sathya Perla92bf14a2013-08-27 16:57:32 +05304233 res->max_mcast_mac = BE_MAX_MC;
4234
Vasundhara Volama5243da2014-03-11 18:53:07 +05304235 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4236 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4237 * *only* if it is RSS-capable.
4238 */
4239 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004240 be_virtfn(adapter) ||
4241 (be_is_mc(adapter) &&
4242 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304243 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304244 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4245 struct be_resources super_nic_res = {0};
4246
4247 /* On a SuperNIC profile, the driver needs to use the
4248 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4249 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004250 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4251 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4252 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304253 /* Some old versions of BE3 FW don't report max_tx_qs value */
4254 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4255 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304256 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304257 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304258
4259 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4260 !use_sriov && be_physfn(adapter))
4261 res->max_rss_qs = (adapter->be3_native) ?
4262 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4263 res->max_rx_qs = res->max_rss_qs + 1;
4264
Suresh Reddye3dc8672014-01-06 13:02:25 +05304265 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304266 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304267 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4268 else
4269 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304270
4271 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004272 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304273 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4274 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4275}
4276
Sathya Perla30128032011-11-10 19:17:57 +00004277static void be_setup_init(struct be_adapter *adapter)
4278{
4279 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004280 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004281 adapter->if_handle = -1;
4282 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004283 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304284 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004285 if (be_physfn(adapter))
4286 adapter->cmd_privileges = MAX_PRIVILEGES;
4287 else
4288 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004289}
4290
Somnath Koturde2b1e02016-06-06 07:22:10 -04004291/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4292 * However, this HW limitation is not exposed to the host via any SLI cmd.
4293 * As a result, in the case of SRIOV and in particular multi-partition configs
4294 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4295 * for distribution between the VFs. This self-imposed limit will determine the
4296 * no: of VFs for which RSS can be enabled.
4297 */
Baoyou Xied766e7e2016-09-18 16:35:29 +08004298static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
Somnath Koturde2b1e02016-06-06 07:22:10 -04004299{
4300 struct be_port_resources port_res = {0};
4301 u8 rss_tables_on_port;
4302 u16 max_vfs = be_max_vfs(adapter);
4303
4304 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4305 RESOURCE_LIMITS, 0);
4306
4307 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4308
4309 /* Each PF Pool's RSS Tables limit =
4310 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4311 */
4312 adapter->pool_res.max_rss_tables =
4313 max_vfs * rss_tables_on_port / port_res.max_vfs;
4314}
4315
Vasundhara Volambec84e62014-06-30 13:01:32 +05304316static int be_get_sriov_config(struct be_adapter *adapter)
4317{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304318 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304319 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304320
Somnath Koturde2b1e02016-06-06 07:22:10 -04004321 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4322 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304323
Vasundhara Volamace40af2015-03-04 00:44:34 -05004324 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304325 if (BE3_chip(adapter) && !res.max_vfs) {
4326 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4327 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4328 }
4329
Sathya Perlad3d18312014-08-01 17:47:30 +05304330 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304331
Vasundhara Volamace40af2015-03-04 00:44:34 -05004332 /* If during previous unload of the driver, the VFs were not disabled,
4333 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4334 * Instead use the TotalVFs value stored in the pci-dev struct.
4335 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304336 old_vfs = pci_num_vf(adapter->pdev);
4337 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004338 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4339 old_vfs);
4340
4341 adapter->pool_res.max_vfs =
4342 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304343 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304344 }
4345
Somnath Koturde2b1e02016-06-06 07:22:10 -04004346 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4347 be_calculate_pf_pool_rss_tables(adapter);
4348 dev_info(&adapter->pdev->dev,
4349 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4350 be_max_pf_pool_rss_tables(adapter));
4351 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304352 return 0;
4353}
4354
Vasundhara Volamace40af2015-03-04 00:44:34 -05004355static void be_alloc_sriov_res(struct be_adapter *adapter)
4356{
4357 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004358 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004359 int status;
4360
4361 be_get_sriov_config(adapter);
4362
4363 if (!old_vfs)
4364 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4365
4366 /* When the HW is in SRIOV capable configuration, the PF-pool
4367 * resources are given to PF during driver load, if there are no
4368 * old VFs. This facility is not available in BE3 FW.
4369 * Also, this is done by FW in Lancer chip.
4370 */
4371 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004372 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004373 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004374 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004375 if (status)
4376 dev_err(&adapter->pdev->dev,
4377 "Failed to optimize SRIOV resources\n");
4378 }
4379}
4380
Sathya Perla92bf14a2013-08-27 16:57:32 +05304381static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004382{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304383 struct device *dev = &adapter->pdev->dev;
4384 struct be_resources res = {0};
4385 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004386
Sathya Perla92bf14a2013-08-27 16:57:32 +05304387 /* For Lancer, SH etc read per-function resource limits from FW.
4388 * GET_FUNC_CONFIG returns per function guaranteed limits.
4389 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4390 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004391 if (BEx_chip(adapter)) {
4392 BEx_get_resources(adapter, &res);
4393 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304394 status = be_cmd_get_func_config(adapter, &res);
4395 if (status)
4396 return status;
4397
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004398 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4399 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4400 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4401 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004402 }
4403
Sathya Perlace7faf02016-06-22 08:54:53 -04004404 /* If RoCE is supported stash away half the EQs for RoCE */
4405 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4406 res.max_evt_qs / 2 : res.max_evt_qs;
4407 adapter->res = res;
4408
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004409 /* If FW supports RSS default queue, then skip creating non-RSS
4410 * queue for non-IP traffic.
4411 */
4412 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4413 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4414
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304415 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4416 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004417 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304418 be_max_vfs(adapter));
4419 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4420 be_max_uc(adapter), be_max_mc(adapter),
4421 be_max_vlans(adapter));
4422
Sathya Perlae2617682016-06-22 08:54:54 -04004423 /* Ensure RX and TX queues are created in pairs at init time */
4424 adapter->cfg_num_rx_irqs =
4425 min_t(u16, netif_get_num_default_rss_queues(),
4426 be_max_qp_irqs(adapter));
4427 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304428 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004429}
4430
Sathya Perla39f1d942012-05-08 19:41:24 +00004431static int be_get_config(struct be_adapter *adapter)
4432{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004433 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304434 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004435
Suresh Reddy980df242015-12-30 01:29:03 -05004436 status = be_cmd_get_cntl_attributes(adapter);
4437 if (status)
4438 return status;
4439
Kalesh APe97e3cd2014-07-17 16:20:26 +05304440 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004441 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304442 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004443
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004444 if (!lancer_chip(adapter) && be_physfn(adapter))
4445 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4446
Sathya Perla6b085ba2015-02-23 04:20:09 -05004447 if (BEx_chip(adapter)) {
4448 level = be_cmd_get_fw_log_level(adapter);
4449 adapter->msg_enable =
4450 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4451 }
4452
4453 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004454 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4455 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004456
Vasundhara Volam21252372015-02-06 08:18:42 -05004457 be_cmd_query_port_name(adapter);
4458
4459 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304460 status = be_cmd_get_active_profile(adapter, &profile_id);
4461 if (!status)
4462 dev_info(&adapter->pdev->dev,
4463 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304464 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304465
Sathya Perla92bf14a2013-08-27 16:57:32 +05304466 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004467}
4468
Sathya Perla95046b92013-07-23 15:25:02 +05304469static int be_mac_setup(struct be_adapter *adapter)
4470{
4471 u8 mac[ETH_ALEN];
4472 int status;
4473
4474 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4475 status = be_cmd_get_perm_mac(adapter, mac);
4476 if (status)
4477 return status;
4478
4479 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4480 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Ivan Vecera4993b392017-01-31 20:01:31 +01004481
4482 /* Initial MAC for BE3 VFs is already programmed by PF */
4483 if (BEx_chip(adapter) && be_virtfn(adapter))
4484 memcpy(adapter->dev_mac, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304485 }
4486
Sathya Perla95046b92013-07-23 15:25:02 +05304487 return 0;
4488}
4489
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304490static void be_schedule_worker(struct be_adapter *adapter)
4491{
Sathya Perlab7172412016-07-27 05:26:18 -04004492 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304493 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4494}
4495
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304496static void be_destroy_err_recovery_workq(void)
4497{
4498 if (!be_err_recovery_workq)
4499 return;
4500
4501 flush_workqueue(be_err_recovery_workq);
4502 destroy_workqueue(be_err_recovery_workq);
4503 be_err_recovery_workq = NULL;
4504}
4505
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304506static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004507{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304508 struct be_error_recovery *err_rec = &adapter->error_recovery;
4509
4510 if (!be_err_recovery_workq)
4511 return;
4512
4513 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4514 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004515 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4516}
4517
Sathya Perla77071332013-08-27 16:57:34 +05304518static int be_setup_queues(struct be_adapter *adapter)
4519{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304520 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304521 int status;
4522
4523 status = be_evt_queues_create(adapter);
4524 if (status)
4525 goto err;
4526
4527 status = be_tx_qs_create(adapter);
4528 if (status)
4529 goto err;
4530
4531 status = be_rx_cqs_create(adapter);
4532 if (status)
4533 goto err;
4534
4535 status = be_mcc_queues_create(adapter);
4536 if (status)
4537 goto err;
4538
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304539 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4540 if (status)
4541 goto err;
4542
4543 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4544 if (status)
4545 goto err;
4546
Sathya Perla77071332013-08-27 16:57:34 +05304547 return 0;
4548err:
4549 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4550 return status;
4551}
4552
Ajit Khaparde62219062016-02-10 22:45:53 +05304553static int be_if_create(struct be_adapter *adapter)
4554{
4555 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4556 u32 cap_flags = be_if_cap_flags(adapter);
4557 int status;
4558
Sathya Perlab7172412016-07-27 05:26:18 -04004559 /* alloc required memory for other filtering fields */
4560 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4561 sizeof(*adapter->pmac_id), GFP_KERNEL);
4562 if (!adapter->pmac_id)
4563 return -ENOMEM;
4564
4565 adapter->mc_list = kcalloc(be_max_mc(adapter),
4566 sizeof(*adapter->mc_list), GFP_KERNEL);
4567 if (!adapter->mc_list)
4568 return -ENOMEM;
4569
4570 adapter->uc_list = kcalloc(be_max_uc(adapter),
4571 sizeof(*adapter->uc_list), GFP_KERNEL);
4572 if (!adapter->uc_list)
4573 return -ENOMEM;
4574
Sathya Perlae2617682016-06-22 08:54:54 -04004575 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304576 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4577
4578 en_flags &= cap_flags;
4579 /* will enable all the needed filter flags in be_open() */
4580 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4581 &adapter->if_handle, 0);
4582
Sathya Perlab7172412016-07-27 05:26:18 -04004583 if (status)
4584 return status;
4585
4586 return 0;
Ajit Khaparde62219062016-02-10 22:45:53 +05304587}
4588
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304589int be_update_queues(struct be_adapter *adapter)
4590{
4591 struct net_device *netdev = adapter->netdev;
4592 int status;
4593
4594 if (netif_running(netdev))
4595 be_close(netdev);
4596
4597 be_cancel_worker(adapter);
4598
4599 /* If any vectors have been shared with RoCE we cannot re-program
4600 * the MSIx table.
4601 */
4602 if (!adapter->num_msix_roce_vec)
4603 be_msix_disable(adapter);
4604
4605 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304606 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4607 if (status)
4608 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304609
4610 if (!msix_enabled(adapter)) {
4611 status = be_msix_enable(adapter);
4612 if (status)
4613 return status;
4614 }
4615
Ajit Khaparde62219062016-02-10 22:45:53 +05304616 status = be_if_create(adapter);
4617 if (status)
4618 return status;
4619
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304620 status = be_setup_queues(adapter);
4621 if (status)
4622 return status;
4623
4624 be_schedule_worker(adapter);
4625
4626 if (netif_running(netdev))
4627 status = be_open(netdev);
4628
4629 return status;
4630}
4631
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004632static inline int fw_major_num(const char *fw_ver)
4633{
4634 int fw_major = 0, i;
4635
4636 i = sscanf(fw_ver, "%d.", &fw_major);
4637 if (i != 1)
4638 return 0;
4639
4640 return fw_major;
4641}
4642
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304643/* If it is error recovery, FLR the PF
4644 * Else if any VFs are already enabled don't FLR the PF
4645 */
Sathya Perlaf962f842015-02-23 04:20:16 -05004646static bool be_reset_required(struct be_adapter *adapter)
4647{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304648 if (be_error_recovering(adapter))
4649 return true;
4650 else
4651 return pci_num_vf(adapter->pdev) == 0;
Sathya Perlaf962f842015-02-23 04:20:16 -05004652}
4653
4654/* Wait for the FW to be ready and perform the required initialization */
4655static int be_func_init(struct be_adapter *adapter)
4656{
4657 int status;
4658
4659 status = be_fw_wait_ready(adapter);
4660 if (status)
4661 return status;
4662
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304663 /* FW is now ready; clear errors to allow cmds/doorbell */
4664 be_clear_error(adapter, BE_CLEAR_ALL);
4665
Sathya Perlaf962f842015-02-23 04:20:16 -05004666 if (be_reset_required(adapter)) {
4667 status = be_cmd_reset_function(adapter);
4668 if (status)
4669 return status;
4670
4671 /* Wait for interrupts to quiesce after an FLR */
4672 msleep(100);
Sathya Perlaf962f842015-02-23 04:20:16 -05004673 }
4674
4675 /* Tell FW we're ready to fire cmds */
4676 status = be_cmd_fw_init(adapter);
4677 if (status)
4678 return status;
4679
4680 /* Allow interrupts for other ULPs running on NIC function */
4681 be_intr_set(adapter, true);
4682
4683 return 0;
4684}
4685
Sathya Perla5fb379e2009-06-18 00:02:59 +00004686static int be_setup(struct be_adapter *adapter)
4687{
Sathya Perla39f1d942012-05-08 19:41:24 +00004688 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004689 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004690
Sathya Perlaf962f842015-02-23 04:20:16 -05004691 status = be_func_init(adapter);
4692 if (status)
4693 return status;
4694
Sathya Perla30128032011-11-10 19:17:57 +00004695 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004696
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004697 if (!lancer_chip(adapter))
4698 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004699
Suresh Reddy980df242015-12-30 01:29:03 -05004700 /* invoke this cmd first to get pf_num and vf_num which are needed
4701 * for issuing profile related cmds
4702 */
4703 if (!BEx_chip(adapter)) {
4704 status = be_cmd_get_func_config(adapter, NULL);
4705 if (status)
4706 return status;
4707 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004708
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004709 status = be_get_config(adapter);
4710 if (status)
4711 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004712
Somnath Koturde2b1e02016-06-06 07:22:10 -04004713 if (!BE2_chip(adapter) && be_physfn(adapter))
4714 be_alloc_sriov_res(adapter);
4715
4716 status = be_get_resources(adapter);
4717 if (status)
4718 goto err;
4719
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004720 status = be_msix_enable(adapter);
4721 if (status)
4722 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004723
Kalesh APbcc84142015-08-05 03:27:48 -04004724 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304725 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004726 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004727 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004728
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304729 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4730 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304731 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304732 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004733 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004734 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004735
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004736 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004737
Sathya Perla95046b92013-07-23 15:25:02 +05304738 status = be_mac_setup(adapter);
4739 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004740 goto err;
4741
Kalesh APe97e3cd2014-07-17 16:20:26 +05304742 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304743 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004744
Somnath Koture9e2a902013-10-24 14:37:53 +05304745 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304746 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304747 adapter->fw_ver);
4748 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4749 }
4750
Kalesh AP00d594c2015-01-20 03:51:44 -05004751 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4752 adapter->rx_fc);
4753 if (status)
4754 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4755 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004756
Kalesh AP00d594c2015-01-20 03:51:44 -05004757 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4758 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004759
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304760 if (be_physfn(adapter))
4761 be_cmd_set_logical_link_config(adapter,
4762 IFLA_VF_LINK_STATE_AUTO, 0);
4763
Somnath Kotur884476b2016-06-22 08:54:55 -04004764 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4765 * confusing a linux bridge or OVS that it might be connected to.
4766 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4767 * when SRIOV is not enabled.
4768 */
4769 if (BE3_chip(adapter))
4770 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4771 PORT_FWD_TYPE_PASSTHRU, 0);
4772
Vasundhara Volambec84e62014-06-30 13:01:32 +05304773 if (adapter->num_vfs)
4774 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004775
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004776 status = be_cmd_get_phy_info(adapter);
4777 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004778 adapter->phy.fc_autoneg = 1;
4779
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304780 if (be_physfn(adapter) && !lancer_chip(adapter))
4781 be_cmd_set_features(adapter);
4782
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304783 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304784 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004785 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004786err:
4787 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004788 return status;
4789}
4790
Ivan Vecera66268732011-12-08 01:31:21 +00004791#ifdef CONFIG_NET_POLL_CONTROLLER
4792static void be_netpoll(struct net_device *netdev)
4793{
4794 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004795 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004796 int i;
4797
Sathya Perlae49cc342012-11-27 19:50:02 +00004798 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004799 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004800 napi_schedule(&eqo->napi);
4801 }
Ivan Vecera66268732011-12-08 01:31:21 +00004802}
4803#endif
4804
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004805int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4806{
4807 const struct firmware *fw;
4808 int status;
4809
4810 if (!netif_running(adapter->netdev)) {
4811 dev_err(&adapter->pdev->dev,
4812 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304813 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004814 }
4815
4816 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4817 if (status)
4818 goto fw_exit;
4819
4820 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4821
4822 if (lancer_chip(adapter))
4823 status = lancer_fw_download(adapter, fw);
4824 else
4825 status = be_fw_download(adapter, fw);
4826
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004827 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304828 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004829
Ajit Khaparde84517482009-09-04 03:12:16 +00004830fw_exit:
4831 release_firmware(fw);
4832 return status;
4833}
4834
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004835static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4836 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004837{
4838 struct be_adapter *adapter = netdev_priv(dev);
4839 struct nlattr *attr, *br_spec;
4840 int rem;
4841 int status = 0;
4842 u16 mode = 0;
4843
4844 if (!sriov_enabled(adapter))
4845 return -EOPNOTSUPP;
4846
4847 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004848 if (!br_spec)
4849 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004850
4851 nla_for_each_nested(attr, br_spec, rem) {
4852 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4853 continue;
4854
Thomas Grafb7c1a312014-11-26 13:42:17 +01004855 if (nla_len(attr) < sizeof(mode))
4856 return -EINVAL;
4857
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004858 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004859 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4860 return -EOPNOTSUPP;
4861
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004862 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4863 return -EINVAL;
4864
4865 status = be_cmd_set_hsw_config(adapter, 0, 0,
4866 adapter->if_handle,
4867 mode == BRIDGE_MODE_VEPA ?
4868 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004869 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004870 if (status)
4871 goto err;
4872
4873 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4874 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4875
4876 return status;
4877 }
4878err:
4879 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4880 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4881
4882 return status;
4883}
4884
4885static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004886 struct net_device *dev, u32 filter_mask,
4887 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004888{
4889 struct be_adapter *adapter = netdev_priv(dev);
4890 int status = 0;
4891 u8 hsw_mode;
4892
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004893 /* BE and Lancer chips support VEB mode only */
4894 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004895 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4896 if (!pci_sriov_get_totalvfs(adapter->pdev))
4897 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004898 hsw_mode = PORT_FWD_TYPE_VEB;
4899 } else {
4900 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004901 adapter->if_handle, &hsw_mode,
4902 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004903 if (status)
4904 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004905
4906 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4907 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004908 }
4909
4910 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4911 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004912 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004913 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004914}
4915
Sathya Perlab7172412016-07-27 05:26:18 -04004916static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
4917 void (*func)(struct work_struct *))
4918{
4919 struct be_cmd_work *work;
4920
4921 work = kzalloc(sizeof(*work), GFP_ATOMIC);
4922 if (!work) {
4923 dev_err(&adapter->pdev->dev,
4924 "be_work memory allocation failed\n");
4925 return NULL;
4926 }
4927
4928 INIT_WORK(&work->work, func);
4929 work->adapter = adapter;
4930 return work;
4931}
4932
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004933/* VxLAN offload Notes:
4934 *
4935 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4936 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4937 * is expected to work across all types of IP tunnels once exported. Skyhawk
4938 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304939 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4940 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4941 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004942 *
4943 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05304944 * adds more than one port, disable offloads and re-enable them again when
4945 * there's only one port left. We maintain a list of ports for this purpose.
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004946 */
Sathya Perlab7172412016-07-27 05:26:18 -04004947static void be_work_add_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05304948{
Sathya Perlab7172412016-07-27 05:26:18 -04004949 struct be_cmd_work *cmd_work =
4950 container_of(work, struct be_cmd_work, work);
4951 struct be_adapter *adapter = cmd_work->adapter;
Sathya Perlac9c47142014-03-27 10:46:19 +05304952 struct device *dev = &adapter->pdev->dev;
Sathya Perlab7172412016-07-27 05:26:18 -04004953 __be16 port = cmd_work->info.vxlan_port;
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05304954 struct be_vxlan_port *vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05304955 int status;
4956
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05304957 /* Bump up the alias count if it is an existing port */
4958 list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
4959 if (vxlan_port->port == port) {
4960 vxlan_port->port_aliases++;
4961 goto done;
4962 }
Jiri Benc1e5b3112015-09-17 16:11:13 +02004963 }
4964
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05304965 /* Add a new port to our list. We don't need a lock here since port
4966 * add/delete are done only in the context of a single-threaded work
4967 * queue (be_wq).
4968 */
4969 vxlan_port = kzalloc(sizeof(*vxlan_port), GFP_KERNEL);
4970 if (!vxlan_port)
4971 goto done;
4972
4973 vxlan_port->port = port;
4974 INIT_LIST_HEAD(&vxlan_port->list);
4975 list_add_tail(&vxlan_port->list, &adapter->vxlan_port_list);
4976 adapter->vxlan_port_count++;
4977
Sathya Perlac9c47142014-03-27 10:46:19 +05304978 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304979 dev_info(dev,
4980 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004981 dev_info(dev, "Disabling VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004982 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304983 }
4984
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05304985 if (adapter->vxlan_port_count > 1)
Sathya Perlab7172412016-07-27 05:26:18 -04004986 goto done;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004987
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05304988 status = be_enable_vxlan_offloads(adapter);
4989 if (!status)
4990 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304991
Sathya Perlac9c47142014-03-27 10:46:19 +05304992err:
4993 be_disable_vxlan_offloads(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04004994done:
4995 kfree(cmd_work);
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05304996 return;
Sathya Perlac9c47142014-03-27 10:46:19 +05304997}
4998
Sathya Perlab7172412016-07-27 05:26:18 -04004999static void be_work_del_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305000{
Sathya Perlab7172412016-07-27 05:26:18 -04005001 struct be_cmd_work *cmd_work =
5002 container_of(work, struct be_cmd_work, work);
5003 struct be_adapter *adapter = cmd_work->adapter;
5004 __be16 port = cmd_work->info.vxlan_port;
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05305005 struct be_vxlan_port *vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305006
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05305007 /* Nothing to be done if a port alias is being deleted */
5008 list_for_each_entry(vxlan_port, &adapter->vxlan_port_list, list) {
5009 if (vxlan_port->port == port) {
5010 if (vxlan_port->port_aliases) {
5011 vxlan_port->port_aliases--;
5012 goto done;
5013 }
5014 break;
5015 }
5016 }
Sathya Perlac9c47142014-03-27 10:46:19 +05305017
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05305018 /* No port aliases left; delete the port from the list */
5019 list_del(&vxlan_port->list);
5020 adapter->vxlan_port_count--;
5021
5022 /* Disable VxLAN offload if this is the offloaded port */
5023 if (adapter->vxlan_port == vxlan_port->port) {
5024 WARN_ON(adapter->vxlan_port_count);
5025 be_disable_vxlan_offloads(adapter);
5026 dev_info(&adapter->pdev->dev,
5027 "Disabled VxLAN offloads for UDP port %d\n",
5028 be16_to_cpu(port));
Sathya Perlab7172412016-07-27 05:26:18 -04005029 goto out;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005030 }
5031
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05305032 /* If only 1 port is left, re-enable VxLAN offload */
5033 if (adapter->vxlan_port_count == 1)
5034 be_enable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305035
Sathya Perlab7172412016-07-27 05:26:18 -04005036out:
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05305037 kfree(vxlan_port);
5038done:
Sathya Perlab7172412016-07-27 05:26:18 -04005039 kfree(cmd_work);
5040}
5041
5042static void be_cfg_vxlan_port(struct net_device *netdev,
5043 struct udp_tunnel_info *ti,
5044 void (*func)(struct work_struct *))
5045{
5046 struct be_adapter *adapter = netdev_priv(netdev);
5047 struct be_cmd_work *cmd_work;
5048
5049 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5050 return;
5051
5052 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5053 return;
5054
5055 cmd_work = be_alloc_work(adapter, func);
5056 if (cmd_work) {
5057 cmd_work->info.vxlan_port = ti->port;
5058 queue_work(be_wq, &cmd_work->work);
5059 }
5060}
5061
5062static void be_del_vxlan_port(struct net_device *netdev,
5063 struct udp_tunnel_info *ti)
5064{
5065 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5066}
5067
5068static void be_add_vxlan_port(struct net_device *netdev,
5069 struct udp_tunnel_info *ti)
5070{
5071 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
Sathya Perlac9c47142014-03-27 10:46:19 +05305072}
Joe Stringer725d5482014-11-13 16:38:13 -08005073
Jesse Gross5f352272014-12-23 22:37:26 -08005074static netdev_features_t be_features_check(struct sk_buff *skb,
5075 struct net_device *dev,
5076 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005077{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305078 struct be_adapter *adapter = netdev_priv(dev);
5079 u8 l4_hdr = 0;
5080
5081 /* The code below restricts offload features for some tunneled packets.
5082 * Offload features for normal (non tunnel) packets are unchanged.
5083 */
5084 if (!skb->encapsulation ||
5085 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5086 return features;
5087
5088 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5089 * should disable tunnel offload features if it's not a VxLAN packet,
5090 * as tunnel offloads have been enabled only for VxLAN. This is done to
5091 * allow other tunneled traffic like GRE work fine while VxLAN
5092 * offloads are configured in Skyhawk-R.
5093 */
5094 switch (vlan_get_protocol(skb)) {
5095 case htons(ETH_P_IP):
5096 l4_hdr = ip_hdr(skb)->protocol;
5097 break;
5098 case htons(ETH_P_IPV6):
5099 l4_hdr = ipv6_hdr(skb)->nexthdr;
5100 break;
5101 default:
5102 return features;
5103 }
5104
5105 if (l4_hdr != IPPROTO_UDP ||
5106 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5107 skb->inner_protocol != htons(ETH_P_TEB) ||
5108 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
Sabrina Dubroca096de2f2017-01-03 16:26:04 +01005109 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5110 !adapter->vxlan_port ||
5111 udp_hdr(skb)->dest != adapter->vxlan_port)
Tom Herberta1882222015-12-14 11:19:43 -08005112 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305113
5114 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005115}
Sathya Perlac9c47142014-03-27 10:46:19 +05305116
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305117static int be_get_phys_port_id(struct net_device *dev,
5118 struct netdev_phys_item_id *ppid)
5119{
5120 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5121 struct be_adapter *adapter = netdev_priv(dev);
5122 u8 *id;
5123
5124 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5125 return -ENOSPC;
5126
5127 ppid->id[0] = adapter->hba_port_num + 1;
5128 id = &ppid->id[1];
5129 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5130 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5131 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5132
5133 ppid->id_len = id_len;
5134
5135 return 0;
5136}
5137
Sathya Perlab7172412016-07-27 05:26:18 -04005138static void be_set_rx_mode(struct net_device *dev)
5139{
5140 struct be_adapter *adapter = netdev_priv(dev);
5141 struct be_cmd_work *work;
5142
5143 work = be_alloc_work(adapter, be_work_set_rx_mode);
5144 if (work)
5145 queue_work(be_wq, &work->work);
5146}
5147
stephen hemmingere5686ad2012-01-05 19:10:25 +00005148static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005149 .ndo_open = be_open,
5150 .ndo_stop = be_close,
5151 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005152 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005153 .ndo_set_mac_address = be_mac_addr_set,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005154 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005155 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005156 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5157 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005158 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005159 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005160 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005161 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305162 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005163 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005164#ifdef CONFIG_NET_POLL_CONTROLLER
5165 .ndo_poll_controller = be_netpoll,
5166#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005167 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5168 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005169 .ndo_udp_tunnel_add = be_add_vxlan_port,
5170 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005171 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305172 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005173};
5174
5175static void be_netdev_init(struct net_device *netdev)
5176{
5177 struct be_adapter *adapter = netdev_priv(netdev);
5178
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005179 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005180 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005181 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305182 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005183 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005184
5185 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005186 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005187
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005188 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005189 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005190
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005191 netdev->priv_flags |= IFF_UNICAST_FLT;
5192
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005193 netdev->flags |= IFF_MULTICAST;
5194
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305195 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005196
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005197 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005198
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005199 netdev->ethtool_ops = &be_ethtool_ops;
Jarod Wilsond894be52016-10-20 13:55:16 -04005200
5201 /* MTU range: 256 - 9000 */
5202 netdev->min_mtu = BE_MIN_MTU;
5203 netdev->max_mtu = BE_MAX_MTU;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005204}
5205
Kalesh AP87ac1a52015-02-23 04:20:15 -05005206static void be_cleanup(struct be_adapter *adapter)
5207{
5208 struct net_device *netdev = adapter->netdev;
5209
5210 rtnl_lock();
5211 netif_device_detach(netdev);
5212 if (netif_running(netdev))
5213 be_close(netdev);
5214 rtnl_unlock();
5215
5216 be_clear(adapter);
5217}
5218
Kalesh AP484d76f2015-02-23 04:20:14 -05005219static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005220{
Kalesh APd0e1b312015-02-23 04:20:12 -05005221 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005222 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005223
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005224 status = be_setup(adapter);
5225 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005226 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005227
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005228 rtnl_lock();
5229 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005230 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005231 rtnl_unlock();
5232
5233 if (status)
5234 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005235
Kalesh APd0e1b312015-02-23 04:20:12 -05005236 netif_device_attach(netdev);
5237
Kalesh AP484d76f2015-02-23 04:20:14 -05005238 return 0;
5239}
5240
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305241static void be_soft_reset(struct be_adapter *adapter)
5242{
5243 u32 val;
5244
5245 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5246 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5247 val |= SLIPORT_SOFTRESET_SR_MASK;
5248 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5249}
5250
5251static bool be_err_is_recoverable(struct be_adapter *adapter)
5252{
5253 struct be_error_recovery *err_rec = &adapter->error_recovery;
5254 unsigned long initial_idle_time =
5255 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5256 unsigned long recovery_interval =
5257 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5258 u16 ue_err_code;
5259 u32 val;
5260
5261 val = be_POST_stage_get(adapter);
5262 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5263 return false;
5264 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5265 if (ue_err_code == 0)
5266 return false;
5267
5268 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5269 ue_err_code);
5270
Karim Eshapa2faf2652017-05-01 15:58:08 +02005271 if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305272 dev_err(&adapter->pdev->dev,
5273 "Cannot recover within %lu sec from driver load\n",
5274 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5275 return false;
5276 }
5277
Karim Eshapa2faf2652017-05-01 15:58:08 +02005278 if (err_rec->last_recovery_time && time_before_eq(
5279 jiffies - err_rec->last_recovery_time, recovery_interval)) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305280 dev_err(&adapter->pdev->dev,
5281 "Cannot recover within %lu sec from last recovery\n",
5282 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5283 return false;
5284 }
5285
5286 if (ue_err_code == err_rec->last_err_code) {
5287 dev_err(&adapter->pdev->dev,
5288 "Cannot recover from a consecutive TPE error\n");
5289 return false;
5290 }
5291
5292 err_rec->last_recovery_time = jiffies;
5293 err_rec->last_err_code = ue_err_code;
5294 return true;
5295}
5296
5297static int be_tpe_recover(struct be_adapter *adapter)
5298{
5299 struct be_error_recovery *err_rec = &adapter->error_recovery;
5300 int status = -EAGAIN;
5301 u32 val;
5302
5303 switch (err_rec->recovery_state) {
5304 case ERR_RECOVERY_ST_NONE:
5305 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5306 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5307 break;
5308
5309 case ERR_RECOVERY_ST_DETECT:
5310 val = be_POST_stage_get(adapter);
5311 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5312 POST_STAGE_RECOVERABLE_ERR) {
5313 dev_err(&adapter->pdev->dev,
5314 "Unrecoverable HW error detected: 0x%x\n", val);
5315 status = -EINVAL;
5316 err_rec->resched_delay = 0;
5317 break;
5318 }
5319
5320 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5321
5322 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5323 * milliseconds before it checks for final error status in
5324 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5325 * If it does, then PF0 initiates a Soft Reset.
5326 */
5327 if (adapter->pf_num == 0) {
5328 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5329 err_rec->resched_delay = err_rec->ue_to_reset_time -
5330 ERR_RECOVERY_UE_DETECT_DURATION;
5331 break;
5332 }
5333
5334 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5335 err_rec->resched_delay = err_rec->ue_to_poll_time -
5336 ERR_RECOVERY_UE_DETECT_DURATION;
5337 break;
5338
5339 case ERR_RECOVERY_ST_RESET:
5340 if (!be_err_is_recoverable(adapter)) {
5341 dev_err(&adapter->pdev->dev,
5342 "Failed to meet recovery criteria\n");
5343 status = -EIO;
5344 err_rec->resched_delay = 0;
5345 break;
5346 }
5347 be_soft_reset(adapter);
5348 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5349 err_rec->resched_delay = err_rec->ue_to_poll_time -
5350 err_rec->ue_to_reset_time;
5351 break;
5352
5353 case ERR_RECOVERY_ST_PRE_POLL:
5354 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5355 err_rec->resched_delay = 0;
5356 status = 0; /* done */
5357 break;
5358
5359 default:
5360 status = -EINVAL;
5361 err_rec->resched_delay = 0;
5362 break;
5363 }
5364
5365 return status;
5366}
5367
Kalesh AP484d76f2015-02-23 04:20:14 -05005368static int be_err_recover(struct be_adapter *adapter)
5369{
Kalesh AP484d76f2015-02-23 04:20:14 -05005370 int status;
5371
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305372 if (!lancer_chip(adapter)) {
5373 if (!adapter->error_recovery.recovery_supported ||
5374 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5375 return -EIO;
5376 status = be_tpe_recover(adapter);
5377 if (status)
5378 goto err;
5379 }
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305380
5381 /* Wait for adapter to reach quiescent state before
5382 * destroying queues
5383 */
5384 status = be_fw_wait_ready(adapter);
5385 if (status)
5386 goto err;
5387
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305388 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5389
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305390 be_cleanup(adapter);
5391
Kalesh AP484d76f2015-02-23 04:20:14 -05005392 status = be_resume(adapter);
5393 if (status)
5394 goto err;
5395
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305396 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5397
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005398err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005399 return status;
5400}
5401
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005402static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005403{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305404 struct be_error_recovery *err_rec =
5405 container_of(work, struct be_error_recovery,
5406 err_detection_work.work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005407 struct be_adapter *adapter =
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305408 container_of(err_rec, struct be_adapter,
5409 error_recovery);
5410 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305411 struct device *dev = &adapter->pdev->dev;
5412 int recovery_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005413
5414 be_detect_error(adapter);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305415 if (!be_check_error(adapter, BE_ERROR_HW))
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305416 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005417
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305418 recovery_status = be_err_recover(adapter);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305419 if (!recovery_status) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305420 err_rec->recovery_retries = 0;
5421 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305422 dev_info(dev, "Adapter recovery successful\n");
5423 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305424 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5425 /* BEx/SH recovery state machine */
5426 if (adapter->pf_num == 0 &&
5427 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5428 dev_err(&adapter->pdev->dev,
5429 "Adapter recovery in progress\n");
5430 resched_delay = err_rec->resched_delay;
5431 goto reschedule_task;
5432 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305433 /* For VFs, check if PF have allocated resources
5434 * every second.
5435 */
5436 dev_err(dev, "Re-trying adapter recovery\n");
5437 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305438 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5439 ERR_RECOVERY_MAX_RETRY_COUNT) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305440 /* In case of another error during recovery, it takes 30 sec
5441 * for adapter to come out of error. Retry error recovery after
5442 * this time interval.
5443 */
5444 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305445 resched_delay = ERR_RECOVERY_RETRY_DELAY;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305446 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305447 } else {
5448 dev_err(dev, "Adapter recovery failed\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305449 dev_err(dev, "Please reboot server to recover\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005450 }
5451
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305452 return;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305453
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305454reschedule_task:
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305455 be_schedule_err_detection(adapter, resched_delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005456}
5457
Vasundhara Volam21252372015-02-06 08:18:42 -05005458static void be_log_sfp_info(struct be_adapter *adapter)
5459{
5460 int status;
5461
5462 status = be_cmd_query_sfp_info(adapter);
5463 if (!status) {
5464 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305465 "Port %c: %s Vendor: %s part no: %s",
5466 adapter->port_name,
5467 be_misconfig_evt_port_state[adapter->phy_state],
5468 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005469 adapter->phy.vendor_pn);
5470 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305471 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005472}
5473
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005474static void be_worker(struct work_struct *work)
5475{
5476 struct be_adapter *adapter =
5477 container_of(work, struct be_adapter, work.work);
5478 struct be_rx_obj *rxo;
5479 int i;
5480
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005481 if (be_physfn(adapter) &&
5482 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5483 be_cmd_get_die_temperature(adapter);
5484
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005485 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005486 * mcc completions
5487 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005488 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005489 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005490 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005491 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005492 goto reschedule;
5493 }
5494
5495 if (!adapter->stats_cmd_sent) {
5496 if (lancer_chip(adapter))
5497 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305498 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005499 else
5500 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5501 }
5502
5503 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305504 /* Replenish RX-queues starved due to memory
5505 * allocation failures.
5506 */
5507 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305508 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005509 }
5510
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005511 /* EQ-delay update for Skyhawk is done while notifying EQ */
5512 if (!skyhawk_chip(adapter))
5513 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005514
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305515 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005516 be_log_sfp_info(adapter);
5517
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005518reschedule:
5519 adapter->work_counter++;
Sathya Perlab7172412016-07-27 05:26:18 -04005520 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005521}
5522
Sathya Perla78fad34e2015-02-23 04:20:08 -05005523static void be_unmap_pci_bars(struct be_adapter *adapter)
5524{
5525 if (adapter->csr)
5526 pci_iounmap(adapter->pdev, adapter->csr);
5527 if (adapter->db)
5528 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005529 if (adapter->pcicfg && adapter->pcicfg_mapped)
5530 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005531}
5532
5533static int db_bar(struct be_adapter *adapter)
5534{
Kalesh AP18c57c72015-05-06 05:30:38 -04005535 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005536 return 0;
5537 else
5538 return 4;
5539}
5540
5541static int be_roce_map_pci_bars(struct be_adapter *adapter)
5542{
5543 if (skyhawk_chip(adapter)) {
5544 adapter->roce_db.size = 4096;
5545 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5546 db_bar(adapter));
5547 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5548 db_bar(adapter));
5549 }
5550 return 0;
5551}
5552
5553static int be_map_pci_bars(struct be_adapter *adapter)
5554{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005555 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005556 u8 __iomem *addr;
5557 u32 sli_intf;
5558
5559 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5560 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5561 SLI_INTF_FAMILY_SHIFT;
5562 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5563
5564 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005565 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005566 if (!adapter->csr)
5567 return -ENOMEM;
5568 }
5569
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005570 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005571 if (!addr)
5572 goto pci_map_err;
5573 adapter->db = addr;
5574
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005575 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5576 if (be_physfn(adapter)) {
5577 /* PCICFG is the 2nd BAR in BE2 */
5578 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5579 if (!addr)
5580 goto pci_map_err;
5581 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005582 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005583 } else {
5584 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005585 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005586 }
5587 }
5588
Sathya Perla78fad34e2015-02-23 04:20:08 -05005589 be_roce_map_pci_bars(adapter);
5590 return 0;
5591
5592pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005593 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005594 be_unmap_pci_bars(adapter);
5595 return -ENOMEM;
5596}
5597
5598static void be_drv_cleanup(struct be_adapter *adapter)
5599{
5600 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5601 struct device *dev = &adapter->pdev->dev;
5602
5603 if (mem->va)
5604 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5605
5606 mem = &adapter->rx_filter;
5607 if (mem->va)
5608 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5609
5610 mem = &adapter->stats_cmd;
5611 if (mem->va)
5612 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5613}
5614
5615/* Allocate and initialize various fields in be_adapter struct */
5616static int be_drv_init(struct be_adapter *adapter)
5617{
5618 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5619 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5620 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5621 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5622 struct device *dev = &adapter->pdev->dev;
5623 int status = 0;
5624
5625 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305626 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5627 &mbox_mem_alloc->dma,
5628 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005629 if (!mbox_mem_alloc->va)
5630 return -ENOMEM;
5631
5632 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5633 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5634 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005635
5636 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5637 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5638 &rx_filter->dma, GFP_KERNEL);
5639 if (!rx_filter->va) {
5640 status = -ENOMEM;
5641 goto free_mbox;
5642 }
5643
5644 if (lancer_chip(adapter))
5645 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5646 else if (BE2_chip(adapter))
5647 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5648 else if (BE3_chip(adapter))
5649 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5650 else
5651 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5652 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5653 &stats_cmd->dma, GFP_KERNEL);
5654 if (!stats_cmd->va) {
5655 status = -ENOMEM;
5656 goto free_rx_filter;
5657 }
5658
5659 mutex_init(&adapter->mbox_lock);
Sathya Perlab7172412016-07-27 05:26:18 -04005660 mutex_init(&adapter->mcc_lock);
5661 mutex_init(&adapter->rx_filter_lock);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005662 spin_lock_init(&adapter->mcc_cq_lock);
5663 init_completion(&adapter->et_cmd_compl);
5664
5665 pci_save_state(adapter->pdev);
5666
5667 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305668
5669 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5670 adapter->error_recovery.resched_delay = 0;
5671 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005672 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005673
5674 adapter->rx_fc = true;
5675 adapter->tx_fc = true;
5676
5677 /* Must be a power of 2 or else MODULO will BUG_ON */
5678 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005679
Sriharsha Basavapatnabf8d9df2017-04-17 21:33:13 +05305680 INIT_LIST_HEAD(&adapter->vxlan_port_list);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005681 return 0;
5682
5683free_rx_filter:
5684 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5685free_mbox:
5686 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5687 mbox_mem_alloc->dma);
5688 return status;
5689}
5690
5691static void be_remove(struct pci_dev *pdev)
5692{
5693 struct be_adapter *adapter = pci_get_drvdata(pdev);
5694
5695 if (!adapter)
5696 return;
5697
5698 be_roce_dev_remove(adapter);
5699 be_intr_set(adapter, false);
5700
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005701 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005702
5703 unregister_netdev(adapter->netdev);
5704
5705 be_clear(adapter);
5706
Somnath Koturf72099e2016-09-07 19:57:50 +05305707 if (!pci_vfs_assigned(adapter->pdev))
5708 be_cmd_reset_function(adapter);
5709
Sathya Perla78fad34e2015-02-23 04:20:08 -05005710 /* tell fw we're done with firing cmds */
5711 be_cmd_fw_clean(adapter);
5712
5713 be_unmap_pci_bars(adapter);
5714 be_drv_cleanup(adapter);
5715
5716 pci_disable_pcie_error_reporting(pdev);
5717
5718 pci_release_regions(pdev);
5719 pci_disable_device(pdev);
5720
5721 free_netdev(adapter->netdev);
5722}
5723
Arnd Bergmann9a032592015-05-18 23:06:45 +02005724static ssize_t be_hwmon_show_temp(struct device *dev,
5725 struct device_attribute *dev_attr,
5726 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305727{
5728 struct be_adapter *adapter = dev_get_drvdata(dev);
5729
5730 /* Unit: millidegree Celsius */
5731 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5732 return -EIO;
5733 else
5734 return sprintf(buf, "%u\n",
5735 adapter->hwmon_info.be_on_die_temp * 1000);
5736}
5737
5738static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5739 be_hwmon_show_temp, NULL, 1);
5740
5741static struct attribute *be_hwmon_attrs[] = {
5742 &sensor_dev_attr_temp1_input.dev_attr.attr,
5743 NULL
5744};
5745
5746ATTRIBUTE_GROUPS(be_hwmon);
5747
Sathya Perlad3791422012-09-28 04:39:44 +00005748static char *mc_name(struct be_adapter *adapter)
5749{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305750 char *str = ""; /* default */
5751
5752 switch (adapter->mc_type) {
5753 case UMC:
5754 str = "UMC";
5755 break;
5756 case FLEX10:
5757 str = "FLEX10";
5758 break;
5759 case vNIC1:
5760 str = "vNIC-1";
5761 break;
5762 case nPAR:
5763 str = "nPAR";
5764 break;
5765 case UFP:
5766 str = "UFP";
5767 break;
5768 case vNIC2:
5769 str = "vNIC-2";
5770 break;
5771 default:
5772 str = "";
5773 }
5774
5775 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005776}
5777
5778static inline char *func_name(struct be_adapter *adapter)
5779{
5780 return be_physfn(adapter) ? "PF" : "VF";
5781}
5782
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005783static inline char *nic_name(struct pci_dev *pdev)
5784{
5785 switch (pdev->device) {
5786 case OC_DEVICE_ID1:
5787 return OC_NAME;
5788 case OC_DEVICE_ID2:
5789 return OC_NAME_BE;
5790 case OC_DEVICE_ID3:
5791 case OC_DEVICE_ID4:
5792 return OC_NAME_LANCER;
5793 case BE_DEVICE_ID2:
5794 return BE3_NAME;
5795 case OC_DEVICE_ID5:
5796 case OC_DEVICE_ID6:
5797 return OC_NAME_SH;
5798 default:
5799 return BE_NAME;
5800 }
5801}
5802
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005803static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005804{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005805 struct be_adapter *adapter;
5806 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005807 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005808
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305809 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5810
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005811 status = pci_enable_device(pdev);
5812 if (status)
5813 goto do_none;
5814
5815 status = pci_request_regions(pdev, DRV_NAME);
5816 if (status)
5817 goto disable_dev;
5818 pci_set_master(pdev);
5819
Sathya Perla7f640062012-06-05 19:37:20 +00005820 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305821 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005822 status = -ENOMEM;
5823 goto rel_reg;
5824 }
5825 adapter = netdev_priv(netdev);
5826 adapter->pdev = pdev;
5827 pci_set_drvdata(pdev, adapter);
5828 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005829 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005830
Russell King4c15c242013-06-26 23:49:11 +01005831 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005832 if (!status) {
5833 netdev->features |= NETIF_F_HIGHDMA;
5834 } else {
Russell King4c15c242013-06-26 23:49:11 +01005835 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005836 if (status) {
5837 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5838 goto free_netdev;
5839 }
5840 }
5841
Kalesh AP2f951a92014-09-12 17:39:21 +05305842 status = pci_enable_pcie_error_reporting(pdev);
5843 if (!status)
5844 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005845
Sathya Perla78fad34e2015-02-23 04:20:08 -05005846 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005847 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005848 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005849
Sathya Perla78fad34e2015-02-23 04:20:08 -05005850 status = be_drv_init(adapter);
5851 if (status)
5852 goto unmap_bars;
5853
Sathya Perla5fb379e2009-06-18 00:02:59 +00005854 status = be_setup(adapter);
5855 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005856 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005857
Sathya Perla3abcded2010-10-03 22:12:27 -07005858 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005859 status = register_netdev(netdev);
5860 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005861 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005862
Parav Pandit045508a2012-03-26 14:27:13 +00005863 be_roce_dev_add(adapter);
5864
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305865 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305866 adapter->error_recovery.probe_time = jiffies;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005867
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305868 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005869 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305870 adapter->hwmon_info.hwmon_dev =
5871 devm_hwmon_device_register_with_groups(&pdev->dev,
5872 DRV_NAME,
5873 adapter,
5874 be_hwmon_groups);
5875 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5876 }
5877
Sathya Perlad3791422012-09-28 04:39:44 +00005878 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005879 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005880
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005881 return 0;
5882
Sathya Perla5fb379e2009-06-18 00:02:59 +00005883unsetup:
5884 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005885drv_cleanup:
5886 be_drv_cleanup(adapter);
5887unmap_bars:
5888 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005889free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005890 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005891rel_reg:
5892 pci_release_regions(pdev);
5893disable_dev:
5894 pci_disable_device(pdev);
5895do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005896 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005897 return status;
5898}
5899
5900static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5901{
5902 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005903
Ajit Khaparded4360d62013-11-22 12:51:09 -06005904 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005905 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005906
Kalesh AP87ac1a52015-02-23 04:20:15 -05005907 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005908
5909 pci_save_state(pdev);
5910 pci_disable_device(pdev);
5911 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5912 return 0;
5913}
5914
Kalesh AP484d76f2015-02-23 04:20:14 -05005915static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005916{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005917 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005918 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005919
5920 status = pci_enable_device(pdev);
5921 if (status)
5922 return status;
5923
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005924 pci_restore_state(pdev);
5925
Kalesh AP484d76f2015-02-23 04:20:14 -05005926 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005927 if (status)
5928 return status;
5929
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305930 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005931
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005932 return 0;
5933}
5934
Sathya Perla82456b02010-02-17 01:35:37 +00005935/*
5936 * An FLR will stop BE from DMAing any data.
5937 */
5938static void be_shutdown(struct pci_dev *pdev)
5939{
5940 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005941
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005942 if (!adapter)
5943 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005944
Devesh Sharmad114f992014-06-10 19:32:15 +05305945 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005946 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005947 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005948
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005949 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005950
Ajit Khaparde57841862011-04-06 18:08:43 +00005951 be_cmd_reset_function(adapter);
5952
Sathya Perla82456b02010-02-17 01:35:37 +00005953 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005954}
5955
Sathya Perlacf588472010-02-14 21:22:01 +00005956static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305957 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005958{
5959 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005960
5961 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5962
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05305963 be_roce_dev_remove(adapter);
5964
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305965 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5966 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005967
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005968 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005969
Kalesh AP87ac1a52015-02-23 04:20:15 -05005970 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005971 }
Sathya Perlacf588472010-02-14 21:22:01 +00005972
5973 if (state == pci_channel_io_perm_failure)
5974 return PCI_ERS_RESULT_DISCONNECT;
5975
5976 pci_disable_device(pdev);
5977
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005978 /* The error could cause the FW to trigger a flash debug dump.
5979 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005980 * can cause it not to recover; wait for it to finish.
5981 * Wait only for first function as it is needed only once per
5982 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005983 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005984 if (pdev->devfn == 0)
5985 ssleep(30);
5986
Sathya Perlacf588472010-02-14 21:22:01 +00005987 return PCI_ERS_RESULT_NEED_RESET;
5988}
5989
5990static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5991{
5992 struct be_adapter *adapter = pci_get_drvdata(pdev);
5993 int status;
5994
5995 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005996
5997 status = pci_enable_device(pdev);
5998 if (status)
5999 return PCI_ERS_RESULT_DISCONNECT;
6000
6001 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006002 pci_restore_state(pdev);
6003
6004 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00006005 dev_info(&adapter->pdev->dev,
6006 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006007 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006008 if (status)
6009 return PCI_ERS_RESULT_DISCONNECT;
6010
Sathya Perlad6b6d982012-09-05 01:56:48 +00006011 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306012 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00006013 return PCI_ERS_RESULT_RECOVERED;
6014}
6015
6016static void be_eeh_resume(struct pci_dev *pdev)
6017{
6018 int status = 0;
6019 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006020
6021 dev_info(&adapter->pdev->dev, "EEH resume\n");
6022
6023 pci_save_state(pdev);
6024
Kalesh AP484d76f2015-02-23 04:20:14 -05006025 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006026 if (status)
6027 goto err;
6028
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306029 be_roce_dev_add(adapter);
6030
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306031 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00006032 return;
6033err:
6034 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006035}
6036
Vasundhara Volamace40af2015-03-04 00:44:34 -05006037static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6038{
6039 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006040 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05006041 int status;
6042
6043 if (!num_vfs)
6044 be_vf_clear(adapter);
6045
6046 adapter->num_vfs = num_vfs;
6047
6048 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6049 dev_warn(&pdev->dev,
6050 "Cannot disable VFs while they are assigned\n");
6051 return -EBUSY;
6052 }
6053
6054 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6055 * are equally distributed across the max-number of VFs. The user may
6056 * request only a subset of the max-vfs to be enabled.
6057 * Based on num_vfs, redistribute the resources across num_vfs so that
6058 * each VF will have access to more number of resources.
6059 * This facility is not available in BE3 FW.
6060 * Also, this is done by FW in Lancer chip.
6061 */
6062 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006063 be_calculate_vf_res(adapter, adapter->num_vfs,
6064 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006065 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006066 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006067 if (status)
6068 dev_err(&pdev->dev,
6069 "Failed to optimize SR-IOV resources\n");
6070 }
6071
6072 status = be_get_resources(adapter);
6073 if (status)
6074 return be_cmd_status(status);
6075
6076 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6077 rtnl_lock();
6078 status = be_update_queues(adapter);
6079 rtnl_unlock();
6080 if (status)
6081 return be_cmd_status(status);
6082
6083 if (adapter->num_vfs)
6084 status = be_vf_setup(adapter);
6085
6086 if (!status)
6087 return adapter->num_vfs;
6088
6089 return 0;
6090}
6091
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006092static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006093 .error_detected = be_eeh_err_detected,
6094 .slot_reset = be_eeh_reset,
6095 .resume = be_eeh_resume,
6096};
6097
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006098static struct pci_driver be_driver = {
6099 .name = DRV_NAME,
6100 .id_table = be_dev_ids,
6101 .probe = be_probe,
6102 .remove = be_remove,
6103 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006104 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006105 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006106 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006107 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006108};
6109
6110static int __init be_init_module(void)
6111{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306112 int status;
6113
Joe Perches8e95a202009-12-03 07:58:21 +00006114 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6115 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006116 printk(KERN_WARNING DRV_NAME
6117 " : Module param rx_frag_size must be 2048/4096/8192."
6118 " Using 2048\n");
6119 rx_frag_size = 2048;
6120 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006121
Vasundhara Volamace40af2015-03-04 00:44:34 -05006122 if (num_vfs > 0) {
6123 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6124 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6125 }
6126
Sathya Perlab7172412016-07-27 05:26:18 -04006127 be_wq = create_singlethread_workqueue("be_wq");
6128 if (!be_wq) {
6129 pr_warn(DRV_NAME "workqueue creation failed\n");
6130 return -1;
6131 }
6132
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306133 be_err_recovery_workq =
6134 create_singlethread_workqueue("be_err_recover");
6135 if (!be_err_recovery_workq)
6136 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6137
6138 status = pci_register_driver(&be_driver);
6139 if (status) {
6140 destroy_workqueue(be_wq);
6141 be_destroy_err_recovery_workq();
6142 }
6143 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006144}
6145module_init(be_init_module);
6146
6147static void __exit be_exit_module(void)
6148{
6149 pci_unregister_driver(&be_driver);
Sathya Perlab7172412016-07-27 05:26:18 -04006150
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306151 be_destroy_err_recovery_workq();
6152
Sathya Perlab7172412016-07-27 05:26:18 -04006153 if (be_wq)
6154 destroy_workqueue(be_wq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006155}
6156module_exit(be_exit_module);