blob: 52aed6d383c3282bdb5502d426ee5d71ddbc1e55 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053044/* Per-module error detection/recovery workq shared across all functions.
45 * Each function schedules its own work request on this shared workq.
46 */
Wei Yongjune6053dd2016-09-25 15:40:36 +000047static struct workqueue_struct *be_err_recovery_workq;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +053048
Benoit Taine9baa3c32014-08-08 15:56:03 +020049static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070050 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070051 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070052 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
53 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000054 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000055 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000056 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000057 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070058 { 0 }
59};
60MODULE_DEVICE_TABLE(pci, be_dev_ids);
Sathya Perlab7172412016-07-27 05:26:18 -040061
62/* Workqueue used by all functions for defering cmd calls to the adapter */
Wei Yongjune6053dd2016-09-25 15:40:36 +000063static struct workqueue_struct *be_wq;
Sathya Perlab7172412016-07-27 05:26:18 -040064
Ajit Khaparde7c185272010-07-29 06:16:33 +000065/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070066static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000067 "CEV",
68 "CTX",
69 "DBUF",
70 "ERX",
71 "Host",
72 "MPU",
73 "NDMA",
74 "PTC ",
75 "RDMA ",
76 "RXF ",
77 "RXIPS ",
78 "RXULP0 ",
79 "RXULP1 ",
80 "RXULP2 ",
81 "TIM ",
82 "TPOST ",
83 "TPRE ",
84 "TXIPS ",
85 "TXULP0 ",
86 "TXULP1 ",
87 "UC ",
88 "WDMA ",
89 "TXULP2 ",
90 "HOST1 ",
91 "P0_OB_LINK ",
92 "P1_OB_LINK ",
93 "HOST_GPIO ",
94 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053095 "ERX2 ",
96 "SPARE ",
97 "JTAG ",
98 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000099};
Kalesh APe2fb1af2014-09-19 15:46:58 +0530100
Ajit Khaparde7c185272010-07-29 06:16:33 +0000101/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -0700102static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +0000103 "LPCMEMHOST",
104 "MGMT_MAC",
105 "PCS0ONLINE",
106 "MPU_IRAM",
107 "PCS1ONLINE",
108 "PCTL0",
109 "PCTL1",
110 "PMEM",
111 "RR",
112 "TXPB",
113 "RXPP",
114 "XAUI",
115 "TXP",
116 "ARM",
117 "IPC",
118 "HOST2",
119 "HOST3",
120 "HOST4",
121 "HOST5",
122 "HOST6",
123 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530124 "ECRC",
125 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700126 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530127 "PERIPH",
128 "LLTXULP",
129 "D2P",
130 "RCON",
131 "LDMA",
132 "LLTXP",
133 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000134 "Unknown"
135};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700136
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500137#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
138 BE_IF_FLAGS_BROADCAST | \
139 BE_IF_FLAGS_MULTICAST | \
140 BE_IF_FLAGS_PASS_L3L4_ERRORS)
141
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
143{
144 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530145
Sathya Perla1cfafab2012-02-23 18:50:15 +0000146 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000147 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
148 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000149 mem->va = NULL;
150 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151}
152
153static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530154 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155{
156 struct be_dma_mem *mem = &q->dma_mem;
157
158 memset(q, 0, sizeof(*q));
159 q->len = len;
160 q->entry_size = entry_size;
161 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700162 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
163 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000165 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return 0;
167}
168
Somnath Kotur68c45a22013-03-14 02:42:07 +0000169static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170{
Sathya Perladb3ea782011-08-22 19:41:52 +0000171 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172
Sathya Perladb3ea782011-08-22 19:41:52 +0000173 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530174 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
176
Sathya Perla5f0b8492009-07-27 22:52:56 +0000177 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000179 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700180 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000181 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700182 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000183
Sathya Perladb3ea782011-08-22 19:41:52 +0000184 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530185 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188static void be_intr_set(struct be_adapter *adapter, bool enable)
189{
190 int status = 0;
191
192 /* On lancer interrupts can't be controlled via this register */
193 if (lancer_chip(adapter))
194 return;
195
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530196 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000197 return;
198
199 status = be_cmd_intr_set(adapter, enable);
200 if (status)
201 be_reg_intr_set(adapter, enable);
202}
203
Sathya Perla8788fdc2009-07-27 22:52:03 +0000204static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205{
206 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530207
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530208 if (be_check_error(adapter, BE_ERROR_HW))
209 return;
210
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211 val |= qid & DB_RQ_RING_ID_MASK;
212 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000213
214 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000215 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216}
217
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000218static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
219 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700220{
221 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530222
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530223 if (be_check_error(adapter, BE_ERROR_HW))
224 return;
225
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000226 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000228
229 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000230 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231}
232
Sathya Perla8788fdc2009-07-27 22:52:03 +0000233static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400234 bool arm, bool clear_int, u16 num_popped,
235 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236{
237 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530238
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530240 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000241
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530242 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000243 return;
244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 if (arm)
246 val |= 1 << DB_EQ_REARM_SHIFT;
247 if (clear_int)
248 val |= 1 << DB_EQ_CLR_SHIFT;
249 val |= 1 << DB_EQ_EVNT_SHIFT;
250 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400251 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000252 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700253}
254
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256{
257 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530258
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000260 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
261 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000262
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530263 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000264 return;
265
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266 if (arm)
267 val |= 1 << DB_CQ_REARM_SHIFT;
268 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000269 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270}
271
Suresh Reddy988d44b2016-09-07 19:57:52 +0530272static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
273{
274 int i;
275
276 /* Check if mac has already been added as part of uc-list */
277 for (i = 0; i < adapter->uc_macs; i++) {
Ivan Vecera1d0f1102017-01-06 20:30:02 +0100278 if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +0530279 /* mac already added, skip addition */
280 adapter->pmac_id[0] = adapter->pmac_id[i + 1];
281 return 0;
282 }
283 }
284
285 return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
286 &adapter->pmac_id[0], 0);
287}
288
289static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
290{
291 int i;
292
293 /* Skip deletion if the programmed mac is
294 * being used in uc-list
295 */
296 for (i = 0; i < adapter->uc_macs; i++) {
297 if (adapter->pmac_id[i + 1] == pmac_id)
298 return;
299 }
300 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
301}
302
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700303static int be_mac_addr_set(struct net_device *netdev, void *p)
304{
305 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530306 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700307 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530308 int status;
309 u8 mac[ETH_ALEN];
Suresh Reddy988d44b2016-09-07 19:57:52 +0530310 u32 old_pmac_id = adapter->pmac_id[0];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000312 if (!is_valid_ether_addr(addr->sa_data))
313 return -EADDRNOTAVAIL;
314
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530315 /* Proceed further only if, User provided MAC is different
316 * from active MAC
317 */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530318 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530319 return 0;
320
Ivan Vecera34393522017-01-13 22:38:29 +0100321 /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
322 * address
323 */
324 if (BEx_chip(adapter) && be_virtfn(adapter) &&
325 !check_privilege(adapter, BE_PRIV_FILTMGMT))
326 return -EPERM;
327
Kalesh APbcc84142015-08-05 03:27:48 -0400328 /* if device is not running, copy MAC to netdev->dev_addr */
329 if (!netif_running(netdev))
330 goto done;
331
Sathya Perla5a712c12013-07-23 15:24:59 +0530332 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
333 * privilege or if PF did not provision the new MAC address.
334 * On BE3, this cmd will always fail if the VF doesn't have the
335 * FILTMGMT privilege. This failure is OK, only if the PF programmed
336 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000337 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530338 mutex_lock(&adapter->rx_filter_lock);
339 status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
Sathya Perla5a712c12013-07-23 15:24:59 +0530340 if (!status) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530341
342 /* Delete the old programmed MAC. This call may fail if the
343 * old MAC was already deleted by the PF driver.
344 */
345 if (adapter->pmac_id[0] != old_pmac_id)
Suresh Reddy988d44b2016-09-07 19:57:52 +0530346 be_dev_mac_del(adapter, old_pmac_id);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000347 }
348
Suresh Reddy988d44b2016-09-07 19:57:52 +0530349 mutex_unlock(&adapter->rx_filter_lock);
Sathya Perla5a712c12013-07-23 15:24:59 +0530350 /* Decide if the new MAC is successfully activated only after
351 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000352 */
Suresh Reddy988d44b2016-09-07 19:57:52 +0530353 status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
Suresh Reddyb188f092014-01-15 13:23:39 +0530354 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000355 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000356 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700357
Sathya Perla5a712c12013-07-23 15:24:59 +0530358 /* The MAC change did not happen, either due to lack of privilege
359 * or PF didn't pre-provision.
360 */
dingtianhong61d23e92013-12-30 15:40:43 +0800361 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530362 status = -EPERM;
363 goto err;
364 }
Ivan Vecera4993b392017-01-31 20:01:31 +0100365
366 /* Remember currently programmed MAC */
Suresh Reddyc27ebf52016-09-07 19:57:53 +0530367 ether_addr_copy(adapter->dev_mac, addr->sa_data);
Ivan Vecera4993b392017-01-31 20:01:31 +0100368done:
Kalesh APbcc84142015-08-05 03:27:48 -0400369 ether_addr_copy(netdev->dev_addr, addr->sa_data);
370 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000371 return 0;
372err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530373 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700374 return status;
375}
376
Sathya Perlaca34fe32012-11-06 17:48:56 +0000377/* BE2 supports only v0 cmd */
378static void *hw_stats_from_cmd(struct be_adapter *adapter)
379{
380 if (BE2_chip(adapter)) {
381 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
382
383 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500384 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000385 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
386
387 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500388 } else {
389 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
390
391 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000392 }
393}
394
395/* BE2 supports only v0 cmd */
396static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
397{
398 if (BE2_chip(adapter)) {
399 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
400
401 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500402 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000403 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
404
405 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500406 } else {
407 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
408
409 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000410 }
411}
412
413static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000415 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423 drvs->rx_pause_frames = port_stats->rx_pause_frames;
424 drvs->rx_crc_errors = port_stats->rx_crc_errors;
425 drvs->rx_control_frames = port_stats->rx_control_frames;
426 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
427 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
428 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
429 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
430 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
431 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
432 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
433 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
434 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
435 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
436 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000437 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000440 drvs->rx_address_filtered =
441 port_stats->rx_address_filtered +
442 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
445
446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
448
449 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000450 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000452 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000453 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000454 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000455 drvs->forwarded_packets = rxf_stats->forwarded_packets;
456 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000457 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
458 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000459 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
460}
461
Sathya Perlaca34fe32012-11-06 17:48:56 +0000462static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000463{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000464 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
465 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
466 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000467 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000468 &rxf_stats->port[adapter->port_num];
469 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000470
Sathya Perlaac124ff2011-07-25 19:10:14 +0000471 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000472 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
473 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000474 drvs->rx_pause_frames = port_stats->rx_pause_frames;
475 drvs->rx_crc_errors = port_stats->rx_crc_errors;
476 drvs->rx_control_frames = port_stats->rx_control_frames;
477 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
478 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
479 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
480 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
481 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
482 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
483 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
484 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
485 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
486 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
487 drvs->rx_dropped_header_too_small =
488 port_stats->rx_dropped_header_too_small;
489 drvs->rx_input_fifo_overflow_drop =
490 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000491 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000492 drvs->rx_alignment_symbol_errors =
493 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000494 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000495 drvs->tx_pauseframes = port_stats->tx_pauseframes;
496 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000497 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000498 drvs->jabber_events = port_stats->jabber_events;
499 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000500 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000501 drvs->forwarded_packets = rxf_stats->forwarded_packets;
502 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000503 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
504 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000505 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
506}
507
Ajit Khaparde61000862013-10-03 16:16:33 -0500508static void populate_be_v2_stats(struct be_adapter *adapter)
509{
510 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
511 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
512 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
513 struct be_port_rxf_stats_v2 *port_stats =
514 &rxf_stats->port[adapter->port_num];
515 struct be_drv_stats *drvs = &adapter->drv_stats;
516
517 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
518 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
519 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
520 drvs->rx_pause_frames = port_stats->rx_pause_frames;
521 drvs->rx_crc_errors = port_stats->rx_crc_errors;
522 drvs->rx_control_frames = port_stats->rx_control_frames;
523 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
524 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
525 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
526 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
527 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
528 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
529 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
530 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
533 drvs->rx_dropped_header_too_small =
534 port_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop =
536 port_stats->rx_input_fifo_overflow_drop;
537 drvs->rx_address_filtered = port_stats->rx_address_filtered;
538 drvs->rx_alignment_symbol_errors =
539 port_stats->rx_alignment_symbol_errors;
540 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
541 drvs->tx_pauseframes = port_stats->tx_pauseframes;
542 drvs->tx_controlframes = port_stats->tx_controlframes;
543 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
544 drvs->jabber_events = port_stats->jabber_events;
545 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
546 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
547 drvs->forwarded_packets = rxf_stats->forwarded_packets;
548 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
549 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
550 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
551 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530552 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500553 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
554 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
555 drvs->rx_roce_frames = port_stats->roce_frames_received;
556 drvs->roce_drops_crc = port_stats->roce_drops_crc;
557 drvs->roce_drops_payload_len =
558 port_stats->roce_drops_payload_len;
559 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500560}
561
Selvin Xavier005d5692011-05-16 07:36:35 +0000562static void populate_lancer_stats(struct be_adapter *adapter)
563{
Selvin Xavier005d5692011-05-16 07:36:35 +0000564 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530565 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000566
567 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
568 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
569 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
570 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000571 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000572 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000573 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
574 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
575 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
576 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
577 drvs->rx_dropped_tcp_length =
578 pport_stats->rx_dropped_invalid_tcp_length;
579 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
580 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
581 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
582 drvs->rx_dropped_header_too_small =
583 pport_stats->rx_dropped_header_too_small;
584 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000585 drvs->rx_address_filtered =
586 pport_stats->rx_address_filtered +
587 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000588 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000589 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000590 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
591 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000592 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000593 drvs->forwarded_packets = pport_stats->num_forwards_lo;
594 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000595 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000596 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000597}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000598
Sathya Perla09c1c682011-08-22 19:41:53 +0000599static void accumulate_16bit_val(u32 *acc, u16 val)
600{
601#define lo(x) (x & 0xFFFF)
602#define hi(x) (x & 0xFFFF0000)
603 bool wrapped = val < lo(*acc);
604 u32 newacc = hi(*acc) + val;
605
606 if (wrapped)
607 newacc += 65536;
608 ACCESS_ONCE(*acc) = newacc;
609}
610
Jingoo Han4188e7d2013-08-05 18:02:02 +0900611static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530612 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000613{
614 if (!BEx_chip(adapter))
615 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
616 else
617 /* below erx HW counter can actually wrap around after
618 * 65535. Driver accumulates a 32-bit value
619 */
620 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
621 (u16)erx_stat);
622}
623
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000624void be_parse_stats(struct be_adapter *adapter)
625{
Ajit Khaparde61000862013-10-03 16:16:33 -0500626 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000627 struct be_rx_obj *rxo;
628 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000629 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000630
Sathya Perlaca34fe32012-11-06 17:48:56 +0000631 if (lancer_chip(adapter)) {
632 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000633 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000634 if (BE2_chip(adapter))
635 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500636 else if (BE3_chip(adapter))
637 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000638 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500639 else
640 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000641
Ajit Khaparde61000862013-10-03 16:16:33 -0500642 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000643 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000644 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
645 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000646 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000647 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000648}
649
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800650static void be_get_stats64(struct net_device *netdev,
651 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000653 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000654 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700655 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000656 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000657 u64 pkts, bytes;
658 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700659 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700660
Sathya Perla3abcded2010-10-03 22:12:27 -0700661 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000662 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530663
Sathya Perlaab1594e2011-07-25 19:10:15 +0000664 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700665 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 pkts = rx_stats(rxo)->rx_pkts;
667 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700668 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000669 stats->rx_packets += pkts;
670 stats->rx_bytes += bytes;
671 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
672 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
673 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700674 }
675
Sathya Perla3c8def92011-06-12 20:01:58 +0000676 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000677 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530678
Sathya Perlaab1594e2011-07-25 19:10:15 +0000679 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700680 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000681 pkts = tx_stats(txo)->tx_pkts;
682 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700683 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000684 stats->tx_packets += pkts;
685 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000686 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687
688 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000689 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000690 drvs->rx_alignment_symbol_errors +
691 drvs->rx_in_range_errors +
692 drvs->rx_out_range_errors +
693 drvs->rx_frame_too_long +
694 drvs->rx_dropped_too_small +
695 drvs->rx_dropped_too_short +
696 drvs->rx_dropped_header_too_small +
697 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000698 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000701 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000702 drvs->rx_out_range_errors +
703 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000704
Sathya Perlaab1594e2011-07-25 19:10:15 +0000705 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700706
707 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000708 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000709
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700710 /* receiver fifo overrun */
711 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000712 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000713 drvs->rx_input_fifo_overflow_drop +
714 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700715}
716
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000717void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700718{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719 struct net_device *netdev = adapter->netdev;
720
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000721 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000722 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000723 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000725
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530726 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000727 netif_carrier_on(netdev);
728 else
729 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200730
731 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700732}
733
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530734static int be_gso_hdr_len(struct sk_buff *skb)
735{
736 if (skb->encapsulation)
737 return skb_inner_transport_offset(skb) +
738 inner_tcp_hdrlen(skb);
739 return skb_transport_offset(skb) + tcp_hdrlen(skb);
740}
741
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500742static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700743{
Sathya Perla3c8def92011-06-12 20:01:58 +0000744 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530745 u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
746 /* Account for headers which get duplicated in TSO pkt */
747 u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
Sathya Perla3c8def92011-06-12 20:01:58 +0000748
Sathya Perlaab1594e2011-07-25 19:10:15 +0000749 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000750 stats->tx_reqs++;
Sriharsha Basavapatnaf3d6ad82016-10-09 09:58:52 +0530751 stats->tx_bytes += skb->len + dup_hdr_len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530752 stats->tx_pkts += tx_pkts;
753 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
754 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000755 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700756}
757
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500758/* Returns number of WRBs needed for the skb */
759static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700760{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500761 /* +1 for the header wrb */
762 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700763}
764
765static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
766{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500767 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
768 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
769 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
770 wrb->rsvd0 = 0;
771}
772
773/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
774 * to avoid the swap and shift/mask operations in wrb_fill().
775 */
776static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
777{
778 wrb->frag_pa_hi = 0;
779 wrb->frag_pa_lo = 0;
780 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000781 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700782}
783
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000784static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530785 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000786{
787 u8 vlan_prio;
788 u16 vlan_tag;
789
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100790 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000791 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
792 /* If vlan priority provided by OS is NOT in available bmap */
793 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
794 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500795 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000796
797 return vlan_tag;
798}
799
Sathya Perlac9c47142014-03-27 10:46:19 +0530800/* Used only for IP tunnel packets */
801static u16 skb_inner_ip_proto(struct sk_buff *skb)
802{
803 return (inner_ip_hdr(skb)->version == 4) ?
804 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
805}
806
807static u16 skb_ip_proto(struct sk_buff *skb)
808{
809 return (ip_hdr(skb)->version == 4) ?
810 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
811}
812
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530813static inline bool be_is_txq_full(struct be_tx_obj *txo)
814{
815 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
816}
817
818static inline bool be_can_txq_wake(struct be_tx_obj *txo)
819{
820 return atomic_read(&txo->q.used) < txo->q.len / 2;
821}
822
823static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
824{
825 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
826}
827
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530828static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
829 struct sk_buff *skb,
830 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530832 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000834 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530835 BE_WRB_F_SET(wrb_params->features, LSO, 1);
836 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000837 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530838 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700839 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530840 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530841 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530842 proto = skb_inner_ip_proto(skb);
843 } else {
844 proto = skb_ip_proto(skb);
845 }
846 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530847 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530848 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530849 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 }
851
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100852 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530853 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
854 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855 }
856
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530857 BE_WRB_F_SET(wrb_params->features, CRC, 1);
858}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500859
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530860static void wrb_fill_hdr(struct be_adapter *adapter,
861 struct be_eth_hdr_wrb *hdr,
862 struct be_wrb_params *wrb_params,
863 struct sk_buff *skb)
864{
865 memset(hdr, 0, sizeof(*hdr));
866
867 SET_TX_WRB_HDR_BITS(crc, hdr,
868 BE_WRB_F_GET(wrb_params->features, CRC));
869 SET_TX_WRB_HDR_BITS(ipcs, hdr,
870 BE_WRB_F_GET(wrb_params->features, IPCS));
871 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
872 BE_WRB_F_GET(wrb_params->features, TCPCS));
873 SET_TX_WRB_HDR_BITS(udpcs, hdr,
874 BE_WRB_F_GET(wrb_params->features, UDPCS));
875
876 SET_TX_WRB_HDR_BITS(lso, hdr,
877 BE_WRB_F_GET(wrb_params->features, LSO));
878 SET_TX_WRB_HDR_BITS(lso6, hdr,
879 BE_WRB_F_GET(wrb_params->features, LSO6));
880 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
881
882 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
883 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500884 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530885 SET_TX_WRB_HDR_BITS(event, hdr,
886 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
887 SET_TX_WRB_HDR_BITS(vlan, hdr,
888 BE_WRB_F_GET(wrb_params->features, VLAN));
889 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
890
891 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
892 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530893 SET_TX_WRB_HDR_BITS(mgmt, hdr,
894 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700895}
896
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000897static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530898 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000899{
900 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500901 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000902
Sathya Perla7101e112010-03-22 20:41:12 +0000903
Sathya Perlaf986afc2015-02-06 08:18:43 -0500904 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
905 (u64)le32_to_cpu(wrb->frag_pa_lo);
906 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000907 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500908 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000909 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500910 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000911 }
912}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700913
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530914/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530915static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700916{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530917 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700918
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530919 queue_head_inc(&txo->q);
920 return head;
921}
922
923/* Set up the WRB header for xmit */
924static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
925 struct be_tx_obj *txo,
926 struct be_wrb_params *wrb_params,
927 struct sk_buff *skb, u16 head)
928{
929 u32 num_frags = skb_wrb_cnt(skb);
930 struct be_queue_info *txq = &txo->q;
931 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
932
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530933 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500934 be_dws_cpu_to_le(hdr, sizeof(*hdr));
935
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500936 BUG_ON(txo->sent_skb_list[head]);
937 txo->sent_skb_list[head] = skb;
938 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530939 atomic_add(num_frags, &txq->used);
940 txo->last_req_wrb_cnt = num_frags;
941 txo->pend_wrb_cnt += num_frags;
942}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530944/* Setup a WRB fragment (buffer descriptor) for xmit */
945static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
946 int len)
947{
948 struct be_eth_wrb *wrb;
949 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700950
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530951 wrb = queue_head_node(txq);
952 wrb_fill(wrb, busaddr, len);
953 queue_head_inc(txq);
954}
955
956/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
957 * was invoked. The producer index is restored to the previous packet and the
958 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
959 */
960static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530961 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530962 u32 copied)
963{
964 struct device *dev;
965 struct be_eth_wrb *wrb;
966 struct be_queue_info *txq = &txo->q;
967
968 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500969 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530970
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500971 /* skip the first wrb (hdr); it's not mapped */
972 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000973 while (copied) {
974 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000975 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000976 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500977 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000978 queue_head_inc(txq);
979 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530980
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500981 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530982}
983
984/* Enqueue the given packet for transmit. This routine allocates WRBs for the
985 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
986 * of WRBs used up by the packet.
987 */
988static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
989 struct sk_buff *skb,
990 struct be_wrb_params *wrb_params)
991{
992 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
993 struct device *dev = &adapter->pdev->dev;
994 struct be_queue_info *txq = &txo->q;
995 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530996 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530997 dma_addr_t busaddr;
998 int len;
999
1000 head = be_tx_get_wrb_hdr(txo);
1001
1002 if (skb->len > skb->data_len) {
1003 len = skb_headlen(skb);
1004
1005 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1006 if (dma_mapping_error(dev, busaddr))
1007 goto dma_err;
1008 map_single = true;
1009 be_tx_setup_wrb_frag(txo, busaddr, len);
1010 copied += len;
1011 }
1012
1013 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1014 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1015 len = skb_frag_size(frag);
1016
1017 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1018 if (dma_mapping_error(dev, busaddr))
1019 goto dma_err;
1020 be_tx_setup_wrb_frag(txo, busaddr, len);
1021 copied += len;
1022 }
1023
1024 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1025
1026 be_tx_stats_update(txo, skb);
1027 return wrb_cnt;
1028
1029dma_err:
1030 adapter->drv_stats.dma_map_errors++;
1031 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +00001032 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001033}
1034
Sathya Perlaf7062ee2015-02-06 08:18:35 -05001035static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1036{
1037 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1038}
1039
Somnath Kotur93040ae2012-06-26 22:32:10 +00001040static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001041 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301042 struct be_wrb_params
1043 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +00001044{
1045 u16 vlan_tag = 0;
1046
1047 skb = skb_share_check(skb, GFP_ATOMIC);
1048 if (unlikely(!skb))
1049 return skb;
1050
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001051 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +00001052 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301053
1054 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1055 if (!vlan_tag)
1056 vlan_tag = adapter->pvid;
1057 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1058 * skip VLAN insertion
1059 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301060 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301061 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001062
1063 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001064 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1065 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001066 if (unlikely(!skb))
1067 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001068 skb->vlan_tci = 0;
1069 }
1070
1071 /* Insert the outer VLAN, if any */
1072 if (adapter->qnq_vid) {
1073 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001074 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1075 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001076 if (unlikely(!skb))
1077 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301078 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001079 }
1080
Somnath Kotur93040ae2012-06-26 22:32:10 +00001081 return skb;
1082}
1083
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001084static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1085{
1086 struct ethhdr *eh = (struct ethhdr *)skb->data;
1087 u16 offset = ETH_HLEN;
1088
1089 if (eh->h_proto == htons(ETH_P_IPV6)) {
1090 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1091
1092 offset += sizeof(struct ipv6hdr);
1093 if (ip6h->nexthdr != NEXTHDR_TCP &&
1094 ip6h->nexthdr != NEXTHDR_UDP) {
1095 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301096 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001097
1098 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1099 if (ehdr->hdrlen == 0xff)
1100 return true;
1101 }
1102 }
1103 return false;
1104}
1105
1106static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1107{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001108 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001109}
1110
Sathya Perla748b5392014-05-09 13:29:13 +05301111static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001112{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001113 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001114}
1115
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301116static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1117 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301118 struct be_wrb_params
1119 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001121 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001122 unsigned int eth_hdr_len;
1123 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001124
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001125 /* For padded packets, BE HW modifies tot_len field in IP header
1126 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001127 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001128 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001129 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1130 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001131 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001132 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001133 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001134 ip = (struct iphdr *)ip_hdr(skb);
1135 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1136 }
1137
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001138 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301139 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001140 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301141 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001142 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301143 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001144
Somnath Kotur93040ae2012-06-26 22:32:10 +00001145 /* HW has a bug wherein it will calculate CSUM for VLAN
1146 * pkts even though it is disabled.
1147 * Manually insert VLAN in pkt.
1148 */
1149 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001150 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301151 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001152 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301153 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001154 }
1155
1156 /* HW may lockup when VLAN HW tagging is requested on
1157 * certain ipv6 packets. Drop such pkts if the HW workaround to
1158 * skip HW tagging is not enabled by FW.
1159 */
1160 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301161 (adapter->pvid || adapter->qnq_vid) &&
1162 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001163 goto tx_drop;
1164
1165 /* Manual VLAN tag insertion to prevent:
1166 * ASIC lockup when the ASIC inserts VLAN tag into
1167 * certain ipv6 packets. Insert VLAN tags in driver,
1168 * and set event, completion, vlan bits accordingly
1169 * in the Tx WRB.
1170 */
1171 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1172 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301173 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001174 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301175 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001176 }
1177
Sathya Perlaee9c7992013-05-22 23:04:55 +00001178 return skb;
1179tx_drop:
1180 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301181err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001182 return NULL;
1183}
1184
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301185static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1186 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301187 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301188{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301189 int err;
1190
Suresh Reddy8227e992015-10-12 03:47:19 -04001191 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1192 * packets that are 32b or less may cause a transmit stall
1193 * on that port. The workaround is to pad such packets
1194 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301195 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001196 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001197 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301198 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301199 }
1200
1201 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301202 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301203 if (!skb)
1204 return NULL;
1205 }
1206
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301207 /* The stack can send us skbs with length greater than
1208 * what the HW can handle. Trim the extra bytes.
1209 */
1210 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1211 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1212 WARN_ON(err);
1213
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301214 return skb;
1215}
1216
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001217static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1218{
1219 struct be_queue_info *txq = &txo->q;
1220 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1221
1222 /* Mark the last request eventable if it hasn't been marked already */
1223 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1224 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1225
1226 /* compose a dummy wrb if there are odd set of wrbs to notify */
1227 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001228 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001229 queue_head_inc(txq);
1230 atomic_inc(&txq->used);
1231 txo->pend_wrb_cnt++;
1232 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1233 TX_HDR_WRB_NUM_SHIFT);
1234 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1235 TX_HDR_WRB_NUM_SHIFT);
1236 }
1237 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1238 txo->pend_wrb_cnt = 0;
1239}
1240
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301241/* OS2BMC related */
1242
1243#define DHCP_CLIENT_PORT 68
1244#define DHCP_SERVER_PORT 67
1245#define NET_BIOS_PORT1 137
1246#define NET_BIOS_PORT2 138
1247#define DHCPV6_RAS_PORT 547
1248
1249#define is_mc_allowed_on_bmc(adapter, eh) \
1250 (!is_multicast_filt_enabled(adapter) && \
1251 is_multicast_ether_addr(eh->h_dest) && \
1252 !is_broadcast_ether_addr(eh->h_dest))
1253
1254#define is_bc_allowed_on_bmc(adapter, eh) \
1255 (!is_broadcast_filt_enabled(adapter) && \
1256 is_broadcast_ether_addr(eh->h_dest))
1257
1258#define is_arp_allowed_on_bmc(adapter, skb) \
1259 (is_arp(skb) && is_arp_filt_enabled(adapter))
1260
1261#define is_broadcast_packet(eh, adapter) \
1262 (is_multicast_ether_addr(eh->h_dest) && \
1263 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1264
1265#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1266
1267#define is_arp_filt_enabled(adapter) \
1268 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1269
1270#define is_dhcp_client_filt_enabled(adapter) \
1271 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1272
1273#define is_dhcp_srvr_filt_enabled(adapter) \
1274 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1275
1276#define is_nbios_filt_enabled(adapter) \
1277 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1278
1279#define is_ipv6_na_filt_enabled(adapter) \
1280 (adapter->bmc_filt_mask & \
1281 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1282
1283#define is_ipv6_ra_filt_enabled(adapter) \
1284 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1285
1286#define is_ipv6_ras_filt_enabled(adapter) \
1287 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1288
1289#define is_broadcast_filt_enabled(adapter) \
1290 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1291
1292#define is_multicast_filt_enabled(adapter) \
1293 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1294
1295static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1296 struct sk_buff **skb)
1297{
1298 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1299 bool os2bmc = false;
1300
1301 if (!be_is_os2bmc_enabled(adapter))
1302 goto done;
1303
1304 if (!is_multicast_ether_addr(eh->h_dest))
1305 goto done;
1306
1307 if (is_mc_allowed_on_bmc(adapter, eh) ||
1308 is_bc_allowed_on_bmc(adapter, eh) ||
1309 is_arp_allowed_on_bmc(adapter, (*skb))) {
1310 os2bmc = true;
1311 goto done;
1312 }
1313
1314 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1315 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1316 u8 nexthdr = hdr->nexthdr;
1317
1318 if (nexthdr == IPPROTO_ICMPV6) {
1319 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1320
1321 switch (icmp6->icmp6_type) {
1322 case NDISC_ROUTER_ADVERTISEMENT:
1323 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1324 goto done;
1325 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1326 os2bmc = is_ipv6_na_filt_enabled(adapter);
1327 goto done;
1328 default:
1329 break;
1330 }
1331 }
1332 }
1333
1334 if (is_udp_pkt((*skb))) {
1335 struct udphdr *udp = udp_hdr((*skb));
1336
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001337 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301338 case DHCP_CLIENT_PORT:
1339 os2bmc = is_dhcp_client_filt_enabled(adapter);
1340 goto done;
1341 case DHCP_SERVER_PORT:
1342 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1343 goto done;
1344 case NET_BIOS_PORT1:
1345 case NET_BIOS_PORT2:
1346 os2bmc = is_nbios_filt_enabled(adapter);
1347 goto done;
1348 case DHCPV6_RAS_PORT:
1349 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1350 goto done;
1351 default:
1352 break;
1353 }
1354 }
1355done:
1356 /* For packets over a vlan, which are destined
1357 * to BMC, asic expects the vlan to be inline in the packet.
1358 */
1359 if (os2bmc)
1360 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1361
1362 return os2bmc;
1363}
1364
Sathya Perlaee9c7992013-05-22 23:04:55 +00001365static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1366{
1367 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001368 u16 q_idx = skb_get_queue_mapping(skb);
1369 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301370 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301371 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001372 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001373
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301374 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001375 if (unlikely(!skb))
1376 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001377
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301378 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1379
1380 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001381 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001382 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001383 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001385
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301386 /* if os2bmc is enabled and if the pkt is destined to bmc,
1387 * enqueue the pkt a 2nd time with mgmt bit set.
1388 */
1389 if (be_send_pkt_to_bmc(adapter, &skb)) {
1390 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1391 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1392 if (unlikely(!wrb_cnt))
1393 goto drop;
1394 else
1395 skb_get(skb);
1396 }
1397
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301398 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001399 netif_stop_subqueue(netdev, q_idx);
1400 tx_stats(txo)->tx_stops++;
1401 }
1402
1403 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1404 be_xmit_flush(adapter, txo);
1405
1406 return NETDEV_TX_OK;
1407drop:
1408 tx_stats(txo)->tx_drv_drops++;
1409 /* Flush the already enqueued tx requests */
1410 if (flush && txo->pend_wrb_cnt)
1411 be_xmit_flush(adapter, txo);
1412
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413 return NETDEV_TX_OK;
1414}
1415
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001416static inline bool be_in_all_promisc(struct be_adapter *adapter)
1417{
1418 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1419 BE_IF_FLAGS_ALL_PROMISCUOUS;
1420}
1421
1422static int be_set_vlan_promisc(struct be_adapter *adapter)
1423{
1424 struct device *dev = &adapter->pdev->dev;
1425 int status;
1426
1427 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1428 return 0;
1429
1430 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1431 if (!status) {
1432 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1433 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1434 } else {
1435 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1436 }
1437 return status;
1438}
1439
1440static int be_clear_vlan_promisc(struct be_adapter *adapter)
1441{
1442 struct device *dev = &adapter->pdev->dev;
1443 int status;
1444
1445 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1446 if (!status) {
1447 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1448 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1449 }
1450 return status;
1451}
1452
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001453/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001454 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1455 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456 */
Sathya Perla10329df2012-06-05 19:37:18 +00001457static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001458{
Vasundhara Volam50762662014-09-12 17:39:14 +05301459 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001460 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301461 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001462 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001463
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001464 /* No need to change the VLAN state if the I/F is in promiscuous */
1465 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001466 return 0;
1467
Sathya Perla92bf14a2013-08-27 16:57:32 +05301468 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001469 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001470
Somnath Kotur841f60f2016-07-27 05:26:15 -04001471 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1472 status = be_clear_vlan_promisc(adapter);
1473 if (status)
1474 return status;
1475 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001476 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301477 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1478 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001479
Vasundhara Volam435452a2015-03-20 06:28:23 -04001480 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001481 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001482 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001483 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001484 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1485 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301486 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001487 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001489 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490}
1491
Patrick McHardy80d5c362013-04-19 02:04:28 +00001492static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493{
1494 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001495 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001496
Sathya Perlab7172412016-07-27 05:26:18 -04001497 mutex_lock(&adapter->rx_filter_lock);
1498
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001499 /* Packets with VID 0 are always received by Lancer by default */
1500 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001501 goto done;
Vasundhara Volam48291c22014-03-11 18:53:08 +05301502
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301503 if (test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001504 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001505
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301506 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301507 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001508
Sathya Perlab7172412016-07-27 05:26:18 -04001509 status = be_vid_config(adapter);
1510done:
1511 mutex_unlock(&adapter->rx_filter_lock);
1512 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001513}
1514
Patrick McHardy80d5c362013-04-19 02:04:28 +00001515static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001516{
1517 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perlab7172412016-07-27 05:26:18 -04001518 int status = 0;
1519
1520 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001521
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001522 /* Packets with VID 0 are always received by Lancer by default */
1523 if (lancer_chip(adapter) && vid == 0)
Sathya Perlab7172412016-07-27 05:26:18 -04001524 goto done;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001525
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301526 if (!test_bit(vid, adapter->vids))
Sathya Perlab7172412016-07-27 05:26:18 -04001527 goto done;
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301528
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301529 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301530 adapter->vlans_added--;
1531
Sathya Perlab7172412016-07-27 05:26:18 -04001532 status = be_vid_config(adapter);
1533done:
1534 mutex_unlock(&adapter->rx_filter_lock);
1535 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536}
1537
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001538static void be_set_all_promisc(struct be_adapter *adapter)
1539{
1540 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1541 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1542}
1543
1544static void be_set_mc_promisc(struct be_adapter *adapter)
1545{
1546 int status;
1547
1548 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1549 return;
1550
1551 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1552 if (!status)
1553 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1554}
1555
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001556static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001557{
1558 int status;
1559
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001560 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1561 return;
1562
1563 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001564 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001565 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1566}
1567
1568static void be_clear_uc_promisc(struct be_adapter *adapter)
1569{
1570 int status;
1571
1572 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1573 return;
1574
1575 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1576 if (!status)
1577 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1578}
1579
1580/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1581 * We use a single callback function for both sync and unsync. We really don't
1582 * add/remove addresses through this callback. But, we use it to detect changes
1583 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1584 */
1585static int be_uc_list_update(struct net_device *netdev,
1586 const unsigned char *addr)
1587{
1588 struct be_adapter *adapter = netdev_priv(netdev);
1589
1590 adapter->update_uc_list = true;
1591 return 0;
1592}
1593
1594static int be_mc_list_update(struct net_device *netdev,
1595 const unsigned char *addr)
1596{
1597 struct be_adapter *adapter = netdev_priv(netdev);
1598
1599 adapter->update_mc_list = true;
1600 return 0;
1601}
1602
1603static void be_set_mc_list(struct be_adapter *adapter)
1604{
1605 struct net_device *netdev = adapter->netdev;
Sathya Perlab7172412016-07-27 05:26:18 -04001606 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001607 bool mc_promisc = false;
1608 int status;
1609
Sathya Perlab7172412016-07-27 05:26:18 -04001610 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001611 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1612
1613 if (netdev->flags & IFF_PROMISC) {
1614 adapter->update_mc_list = false;
1615 } else if (netdev->flags & IFF_ALLMULTI ||
1616 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1617 /* Enable multicast promisc if num configured exceeds
1618 * what we support
1619 */
1620 mc_promisc = true;
1621 adapter->update_mc_list = false;
1622 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1623 /* Update mc-list unconditionally if the iface was previously
1624 * in mc-promisc mode and now is out of that mode.
1625 */
1626 adapter->update_mc_list = true;
1627 }
1628
Sathya Perlab7172412016-07-27 05:26:18 -04001629 if (adapter->update_mc_list) {
1630 int i = 0;
1631
1632 /* cache the mc-list in adapter */
1633 netdev_for_each_mc_addr(ha, netdev) {
1634 ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1635 i++;
1636 }
1637 adapter->mc_count = netdev_mc_count(netdev);
1638 }
1639 netif_addr_unlock_bh(netdev);
1640
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001641 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001642 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001643 } else if (adapter->update_mc_list) {
1644 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1645 if (!status)
1646 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1647 else
1648 be_set_mc_promisc(adapter);
1649
1650 adapter->update_mc_list = false;
1651 }
1652}
1653
1654static void be_clear_mc_list(struct be_adapter *adapter)
1655{
1656 struct net_device *netdev = adapter->netdev;
1657
1658 __dev_mc_unsync(netdev, NULL);
1659 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlab7172412016-07-27 05:26:18 -04001660 adapter->mc_count = 0;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001661}
1662
Suresh Reddy988d44b2016-09-07 19:57:52 +05301663static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1664{
Ivan Vecera1d0f1102017-01-06 20:30:02 +01001665 if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
Suresh Reddy988d44b2016-09-07 19:57:52 +05301666 adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1667 return 0;
1668 }
1669
Ivan Vecera1d0f1102017-01-06 20:30:02 +01001670 return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
Suresh Reddy988d44b2016-09-07 19:57:52 +05301671 adapter->if_handle,
1672 &adapter->pmac_id[uc_idx + 1], 0);
1673}
1674
1675static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1676{
1677 if (pmac_id == adapter->pmac_id[0])
1678 return;
1679
1680 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1681}
1682
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001683static void be_set_uc_list(struct be_adapter *adapter)
1684{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001685 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001686 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001687 bool uc_promisc = false;
Sathya Perlab7172412016-07-27 05:26:18 -04001688 int curr_uc_macs = 0, i;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001689
Sathya Perlab7172412016-07-27 05:26:18 -04001690 netif_addr_lock_bh(netdev);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001691 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001692
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001693 if (netdev->flags & IFF_PROMISC) {
1694 adapter->update_uc_list = false;
1695 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1696 uc_promisc = true;
1697 adapter->update_uc_list = false;
1698 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1699 /* Update uc-list unconditionally if the iface was previously
1700 * in uc-promisc mode and now is out of that mode.
1701 */
1702 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001703 }
1704
Sathya Perlab7172412016-07-27 05:26:18 -04001705 if (adapter->update_uc_list) {
Sathya Perlab7172412016-07-27 05:26:18 -04001706 /* cache the uc-list in adapter array */
Ivan Vecera6052cd12017-01-06 21:59:30 +01001707 i = 0;
Sathya Perlab7172412016-07-27 05:26:18 -04001708 netdev_for_each_uc_addr(ha, netdev) {
1709 ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1710 i++;
1711 }
1712 curr_uc_macs = netdev_uc_count(netdev);
1713 }
1714 netif_addr_unlock_bh(netdev);
1715
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001716 if (uc_promisc) {
1717 be_set_uc_promisc(adapter);
1718 } else if (adapter->update_uc_list) {
1719 be_clear_uc_promisc(adapter);
1720
Sathya Perlab7172412016-07-27 05:26:18 -04001721 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301722 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001723
Sathya Perlab7172412016-07-27 05:26:18 -04001724 for (i = 0; i < curr_uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301725 be_uc_mac_add(adapter, i);
Sathya Perlab7172412016-07-27 05:26:18 -04001726 adapter->uc_macs = curr_uc_macs;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001727 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001728 }
1729}
1730
1731static void be_clear_uc_list(struct be_adapter *adapter)
1732{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001733 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001734 int i;
1735
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001736 __dev_uc_unsync(netdev, NULL);
Sathya Perlab7172412016-07-27 05:26:18 -04001737 for (i = 0; i < adapter->uc_macs; i++)
Suresh Reddy988d44b2016-09-07 19:57:52 +05301738 be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1739
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001740 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301741}
1742
Sathya Perlab7172412016-07-27 05:26:18 -04001743static void __be_set_rx_mode(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744{
Sathya Perlab7172412016-07-27 05:26:18 -04001745 struct net_device *netdev = adapter->netdev;
1746
1747 mutex_lock(&adapter->rx_filter_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748
1749 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001750 if (!be_in_all_promisc(adapter))
1751 be_set_all_promisc(adapter);
1752 } else if (be_in_all_promisc(adapter)) {
1753 /* We need to re-program the vlan-list or clear
1754 * vlan-promisc mode (if needed) when the interface
1755 * comes out of promisc mode.
1756 */
1757 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001759
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001760 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001761 be_set_mc_list(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04001762
1763 mutex_unlock(&adapter->rx_filter_lock);
1764}
1765
1766static void be_work_set_rx_mode(struct work_struct *work)
1767{
1768 struct be_cmd_work *cmd_work =
1769 container_of(work, struct be_cmd_work, work);
1770
1771 __be_set_rx_mode(cmd_work->adapter);
1772 kfree(cmd_work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001773}
1774
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001775static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1776{
1777 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001778 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001779 int status;
1780
Sathya Perla11ac75e2011-12-13 00:58:50 +00001781 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001782 return -EPERM;
1783
Sathya Perla11ac75e2011-12-13 00:58:50 +00001784 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001785 return -EINVAL;
1786
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301787 /* Proceed further only if user provided MAC is different
1788 * from active MAC
1789 */
1790 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1791 return 0;
1792
Sathya Perla3175d8c2013-07-23 15:25:03 +05301793 if (BEx_chip(adapter)) {
1794 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1795 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001796
Sathya Perla11ac75e2011-12-13 00:58:50 +00001797 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1798 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301799 } else {
1800 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1801 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001802 }
1803
Kalesh APabccf232014-07-17 16:20:24 +05301804 if (status) {
1805 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1806 mac, vf, status);
1807 return be_cmd_status(status);
1808 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001809
Kalesh APabccf232014-07-17 16:20:24 +05301810 ether_addr_copy(vf_cfg->mac_addr, mac);
1811
1812 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001813}
1814
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001815static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301816 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001817{
1818 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001819 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001820
Sathya Perla11ac75e2011-12-13 00:58:50 +00001821 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001822 return -EPERM;
1823
Sathya Perla11ac75e2011-12-13 00:58:50 +00001824 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001825 return -EINVAL;
1826
1827 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001828 vi->max_tx_rate = vf_cfg->tx_rate;
1829 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001830 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1831 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001832 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301833 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001834 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001835
1836 return 0;
1837}
1838
Vasundhara Volam435452a2015-03-20 06:28:23 -04001839static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1840{
1841 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1842 u16 vids[BE_NUM_VLANS_SUPPORTED];
1843 int vf_if_id = vf_cfg->if_handle;
1844 int status;
1845
1846 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001847 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001848 if (status)
1849 return status;
1850
1851 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1852 vids[0] = 0;
1853 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1854 if (!status)
1855 dev_info(&adapter->pdev->dev,
1856 "Cleared guest VLANs on VF%d", vf);
1857
1858 /* After TVT is enabled, disallow VFs to program VLAN filters */
1859 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1860 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1861 ~BE_PRIV_FILTMGMT, vf + 1);
1862 if (!status)
1863 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1864 }
1865 return 0;
1866}
1867
1868static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1869{
1870 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1871 struct device *dev = &adapter->pdev->dev;
1872 int status;
1873
1874 /* Reset Transparent VLAN Tagging. */
1875 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001876 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001877 if (status)
1878 return status;
1879
1880 /* Allow VFs to program VLAN filtering */
1881 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1882 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1883 BE_PRIV_FILTMGMT, vf + 1);
1884 if (!status) {
1885 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1886 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1887 }
1888 }
1889
1890 dev_info(dev,
1891 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1892 return 0;
1893}
1894
Moshe Shemesh79aab092016-09-22 12:11:15 +03001895static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1896 __be16 vlan_proto)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001897{
1898 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001899 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001900 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001901
Sathya Perla11ac75e2011-12-13 00:58:50 +00001902 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001903 return -EPERM;
1904
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001905 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001906 return -EINVAL;
1907
Moshe Shemesh79aab092016-09-22 12:11:15 +03001908 if (vlan_proto != htons(ETH_P_8021Q))
1909 return -EPROTONOSUPPORT;
1910
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001911 if (vlan || qos) {
1912 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001913 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001914 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001915 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001916 }
1917
Kalesh APabccf232014-07-17 16:20:24 +05301918 if (status) {
1919 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001920 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1921 status);
Kalesh APabccf232014-07-17 16:20:24 +05301922 return be_cmd_status(status);
1923 }
1924
1925 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301926 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001927}
1928
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001929static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1930 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001931{
1932 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301933 struct device *dev = &adapter->pdev->dev;
1934 int percent_rate, status = 0;
1935 u16 link_speed = 0;
1936 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001937
Sathya Perla11ac75e2011-12-13 00:58:50 +00001938 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001939 return -EPERM;
1940
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001941 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001942 return -EINVAL;
1943
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001944 if (min_tx_rate)
1945 return -EINVAL;
1946
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301947 if (!max_tx_rate)
1948 goto config_qos;
1949
1950 status = be_cmd_link_status_query(adapter, &link_speed,
1951 &link_status, 0);
1952 if (status)
1953 goto err;
1954
1955 if (!link_status) {
1956 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301957 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301958 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001959 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001960
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301961 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1962 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1963 link_speed);
1964 status = -EINVAL;
1965 goto err;
1966 }
1967
1968 /* On Skyhawk the QOS setting must be done only as a % value */
1969 percent_rate = link_speed / 100;
1970 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1971 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1972 percent_rate);
1973 status = -EINVAL;
1974 goto err;
1975 }
1976
1977config_qos:
1978 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001979 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301980 goto err;
1981
1982 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1983 return 0;
1984
1985err:
1986 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1987 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301988 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001989}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301990
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301991static int be_set_vf_link_state(struct net_device *netdev, int vf,
1992 int link_state)
1993{
1994 struct be_adapter *adapter = netdev_priv(netdev);
1995 int status;
1996
1997 if (!sriov_enabled(adapter))
1998 return -EPERM;
1999
2000 if (vf >= adapter->num_vfs)
2001 return -EINVAL;
2002
2003 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05302004 if (status) {
2005 dev_err(&adapter->pdev->dev,
2006 "Link state change on VF %d failed: %#x\n", vf, status);
2007 return be_cmd_status(status);
2008 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302009
Kalesh APabccf232014-07-17 16:20:24 +05302010 adapter->vf_cfg[vf].plink_tracking = link_state;
2011
2012 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05302013}
Ajit Khapardee1d18732010-07-23 01:52:13 +00002014
Kalesh APe7bcbd72015-05-06 05:30:32 -04002015static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2016{
2017 struct be_adapter *adapter = netdev_priv(netdev);
2018 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2019 u8 spoofchk;
2020 int status;
2021
2022 if (!sriov_enabled(adapter))
2023 return -EPERM;
2024
2025 if (vf >= adapter->num_vfs)
2026 return -EINVAL;
2027
2028 if (BEx_chip(adapter))
2029 return -EOPNOTSUPP;
2030
2031 if (enable == vf_cfg->spoofchk)
2032 return 0;
2033
2034 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2035
2036 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2037 0, spoofchk);
2038 if (status) {
2039 dev_err(&adapter->pdev->dev,
2040 "Spoofchk change on VF %d failed: %#x\n", vf, status);
2041 return be_cmd_status(status);
2042 }
2043
2044 vf_cfg->spoofchk = enable;
2045 return 0;
2046}
2047
Sathya Perla2632baf2013-10-01 16:00:00 +05302048static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2049 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002050{
Sathya Perla2632baf2013-10-01 16:00:00 +05302051 aic->rx_pkts_prev = rx_pkts;
2052 aic->tx_reqs_prev = tx_pkts;
2053 aic->jiffies = now;
2054}
Sathya Perlaac124ff2011-07-25 19:10:14 +00002055
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002056static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05302057{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002058 struct be_adapter *adapter = eqo->adapter;
2059 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05302060 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05302061 struct be_rx_obj *rxo;
2062 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002063 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05302064 ulong now;
2065 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002066 int i;
2067
2068 aic = &adapter->aic_obj[eqo->idx];
2069 if (!aic->enable) {
2070 if (aic->jiffies)
2071 aic->jiffies = 0;
2072 eqd = aic->et_eqd;
2073 return eqd;
2074 }
2075
2076 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2077 do {
2078 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2079 rx_pkts += rxo->stats.rx_pkts;
2080 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2081 }
2082
2083 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2084 do {
2085 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2086 tx_pkts += txo->stats.tx_reqs;
2087 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2088 }
2089
2090 /* Skip, if wrapped around or first calculation */
2091 now = jiffies;
2092 if (!aic->jiffies || time_before(now, aic->jiffies) ||
2093 rx_pkts < aic->rx_pkts_prev ||
2094 tx_pkts < aic->tx_reqs_prev) {
2095 be_aic_update(aic, rx_pkts, tx_pkts, now);
2096 return aic->prev_eqd;
2097 }
2098
2099 delta = jiffies_to_msecs(now - aic->jiffies);
2100 if (delta == 0)
2101 return aic->prev_eqd;
2102
2103 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2104 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2105 eqd = (pps / 15000) << 2;
2106
2107 if (eqd < 8)
2108 eqd = 0;
2109 eqd = min_t(u32, eqd, aic->max_eqd);
2110 eqd = max_t(u32, eqd, aic->min_eqd);
2111
2112 be_aic_update(aic, rx_pkts, tx_pkts, now);
2113
2114 return eqd;
2115}
2116
2117/* For Skyhawk-R only */
2118static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2119{
2120 struct be_adapter *adapter = eqo->adapter;
2121 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2122 ulong now = jiffies;
2123 int eqd;
2124 u32 mult_enc;
2125
2126 if (!aic->enable)
2127 return 0;
2128
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302129 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002130 eqd = aic->prev_eqd;
2131 else
2132 eqd = be_get_new_eqd(eqo);
2133
2134 if (eqd > 100)
2135 mult_enc = R2I_DLY_ENC_1;
2136 else if (eqd > 60)
2137 mult_enc = R2I_DLY_ENC_2;
2138 else if (eqd > 20)
2139 mult_enc = R2I_DLY_ENC_3;
2140 else
2141 mult_enc = R2I_DLY_ENC_0;
2142
2143 aic->prev_eqd = eqd;
2144
2145 return mult_enc;
2146}
2147
2148void be_eqd_update(struct be_adapter *adapter, bool force_update)
2149{
2150 struct be_set_eqd set_eqd[MAX_EVT_QS];
2151 struct be_aic_obj *aic;
2152 struct be_eq_obj *eqo;
2153 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002154
Sathya Perla2632baf2013-10-01 16:00:00 +05302155 for_all_evt_queues(adapter, eqo, i) {
2156 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002157 eqd = be_get_new_eqd(eqo);
2158 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302159 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2160 set_eqd[num].eq_id = eqo->q.id;
2161 aic->prev_eqd = eqd;
2162 num++;
2163 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002164 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302165
2166 if (num)
2167 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002168}
2169
Sathya Perla3abcded2010-10-03 22:12:27 -07002170static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302171 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002172{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002173 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002174
Sathya Perlaab1594e2011-07-25 19:10:15 +00002175 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002176 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002177 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002178 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302179 if (rxcp->tunneled)
2180 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002181 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002182 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002183 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002184 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002185 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002186}
2187
Sathya Perla2e588f82011-03-11 02:49:26 +00002188static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002189{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002190 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302191 * Also ignore ipcksm for ipv6 pkts
2192 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002193 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302194 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002195}
2196
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302197static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002199 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002200 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002201 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302202 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203
Sathya Perla3abcded2010-10-03 22:12:27 -07002204 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002205 BUG_ON(!rx_page_info->page);
2206
Sathya Perlae50287b2014-03-04 12:14:38 +05302207 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002208 dma_unmap_page(&adapter->pdev->dev,
2209 dma_unmap_addr(rx_page_info, bus),
2210 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302211 rx_page_info->last_frag = false;
2212 } else {
2213 dma_sync_single_for_cpu(&adapter->pdev->dev,
2214 dma_unmap_addr(rx_page_info, bus),
2215 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002216 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002217
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302218 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002219 atomic_dec(&rxq->used);
2220 return rx_page_info;
2221}
2222
2223/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224static void be_rx_compl_discard(struct be_rx_obj *rxo,
2225 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002227 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002228 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002230 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302231 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002232 put_page(page_info->page);
2233 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234 }
2235}
2236
2237/*
2238 * skb_fill_rx_data forms a complete skb for an ether frame
2239 * indicated by rxcp.
2240 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2242 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002245 u16 i, j;
2246 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247 u8 *start;
2248
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302249 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 start = page_address(page_info->page) + page_info->page_offset;
2251 prefetch(start);
2252
2253 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002254 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002255
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256 skb->len = curr_frag_len;
2257 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002258 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259 /* Complete packet has now been moved to data */
2260 put_page(page_info->page);
2261 skb->data_len = 0;
2262 skb->tail += curr_frag_len;
2263 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002264 hdr_len = ETH_HLEN;
2265 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002266 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002267 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268 skb_shinfo(skb)->frags[0].page_offset =
2269 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302270 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2271 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002273 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274 skb->tail += hdr_len;
2275 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002276 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277
Sathya Perla2e588f82011-03-11 02:49:26 +00002278 if (rxcp->pkt_size <= rx_frag_size) {
2279 BUG_ON(rxcp->num_rcvd != 1);
2280 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281 }
2282
2283 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002284 remaining = rxcp->pkt_size - curr_frag_len;
2285 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302286 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002287 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002289 /* Coalesce all frags from the same physical page in one slot */
2290 if (page_info->page_offset == 0) {
2291 /* Fresh page */
2292 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002293 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002294 skb_shinfo(skb)->frags[j].page_offset =
2295 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002296 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002297 skb_shinfo(skb)->nr_frags++;
2298 } else {
2299 put_page(page_info->page);
2300 }
2301
Eric Dumazet9e903e02011-10-18 21:00:24 +00002302 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303 skb->len += curr_frag_len;
2304 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002305 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002306 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002307 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002308 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002309 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310}
2311
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002312/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302313static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002314 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002315{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002316 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002317 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002318 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002319
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002320 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002321 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002322 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 return;
2325 }
2326
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002329 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002330 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002331 else
2332 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002334 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002335 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002336 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002337 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302338
Tom Herbertb6c0e892014-08-27 21:27:17 -07002339 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302340 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002341
Jiri Pirko343e43c2011-08-25 02:50:51 +00002342 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002343 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002344
2345 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346}
2347
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002348/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002349static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2350 struct napi_struct *napi,
2351 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002352{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002354 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002355 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002356 u16 remaining, curr_frag_len;
2357 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002358
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002359 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002360 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002361 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002362 return;
2363 }
2364
Sathya Perla2e588f82011-03-11 02:49:26 +00002365 remaining = rxcp->pkt_size;
2366 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302367 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368
2369 curr_frag_len = min(remaining, rx_frag_size);
2370
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002371 /* Coalesce all frags from the same physical page in one slot */
2372 if (i == 0 || page_info->page_offset == 0) {
2373 /* First frag or Fresh page */
2374 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002375 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002376 skb_shinfo(skb)->frags[j].page_offset =
2377 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002378 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002379 } else {
2380 put_page(page_info->page);
2381 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002382 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002383 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002385 memset(page_info, 0, sizeof(*page_info));
2386 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002387 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002389 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002390 skb->len = rxcp->pkt_size;
2391 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002392 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002393 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002394 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002395 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302396
Tom Herbertb6c0e892014-08-27 21:27:17 -07002397 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002398
Jiri Pirko343e43c2011-08-25 02:50:51 +00002399 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002400 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002401
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002402 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002403}
2404
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002405static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2406 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002407{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302408 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2409 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2410 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2411 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2412 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2413 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2414 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2415 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2416 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2417 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2418 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002419 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302420 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2421 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002422 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302423 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302424 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302425 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002426}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002427
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002428static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2429 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002430{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302431 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2432 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2433 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2434 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2435 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2436 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2437 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2438 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2439 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2440 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2441 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002442 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302443 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2444 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002445 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302446 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2447 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002448}
2449
2450static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2451{
2452 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2453 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2454 struct be_adapter *adapter = rxo->adapter;
2455
2456 /* For checking the valid bit it is Ok to use either definition as the
2457 * valid bit is at the same position in both v0 and v1 Rx compl */
2458 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002459 return NULL;
2460
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002461 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002462 be_dws_le_to_cpu(compl, sizeof(*compl));
2463
2464 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002465 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002466 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002467 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002468
Somnath Koture38b1702013-05-29 22:55:56 +00002469 if (rxcp->ip_frag)
2470 rxcp->l4_csum = 0;
2471
Sathya Perla15d72182011-03-21 20:49:26 +00002472 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302473 /* In QNQ modes, if qnq bit is not set, then the packet was
2474 * tagged only with the transparent outer vlan-tag and must
2475 * not be treated as a vlan packet by host
2476 */
2477 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002478 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002479
Sathya Perla15d72182011-03-21 20:49:26 +00002480 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002481 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002482
Somnath Kotur939cf302011-08-18 21:51:49 -07002483 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302484 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002485 rxcp->vlanf = 0;
2486 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002487
2488 /* As the compl has been parsed, reset it; we wont touch it again */
2489 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490
Sathya Perla3abcded2010-10-03 22:12:27 -07002491 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002492 return rxcp;
2493}
2494
Eric Dumazet1829b082011-03-01 05:48:12 +00002495static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002497 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002498
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002499 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002500 gfp |= __GFP_COMP;
2501 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002502}
2503
2504/*
2505 * Allocate a page, split it to fragments of size rx_frag_size and post as
2506 * receive buffers to BE
2507 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302508static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002509{
Sathya Perla3abcded2010-10-03 22:12:27 -07002510 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002511 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002512 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002514 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002515 struct be_eth_rx_d *rxd;
2516 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302517 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002518
Sathya Perla3abcded2010-10-03 22:12:27 -07002519 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302520 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002522 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002524 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002525 break;
2526 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002527 page_dmaaddr = dma_map_page(dev, pagep, 0,
2528 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002529 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002530 if (dma_mapping_error(dev, page_dmaaddr)) {
2531 put_page(pagep);
2532 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302533 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002534 break;
2535 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302536 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002537 } else {
2538 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302539 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002540 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302541 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002542 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002543
2544 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302545 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002546 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2547 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002548
2549 /* Any space left in the current big page for another frag? */
2550 if ((page_offset + rx_frag_size + rx_frag_size) >
2551 adapter->big_page_size) {
2552 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302553 page_info->last_frag = true;
2554 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2555 } else {
2556 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002557 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002558
2559 prev_page_info = page_info;
2560 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002561 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002562 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302563
2564 /* Mark the last frag of a page when we break out of the above loop
2565 * with no more slots available in the RXQ
2566 */
2567 if (pagep) {
2568 prev_page_info->last_frag = true;
2569 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2570 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002571
2572 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002573 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302574 if (rxo->rx_post_starved)
2575 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302576 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002577 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302578 be_rxq_notify(adapter, rxq->id, notify);
2579 posted -= notify;
2580 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002581 } else if (atomic_read(&rxq->used) == 0) {
2582 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002583 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002584 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002585}
2586
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302587static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002588{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302589 struct be_queue_info *tx_cq = &txo->cq;
2590 struct be_tx_compl_info *txcp = &txo->txcp;
2591 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002592
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302593 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002594 return NULL;
2595
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302596 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002597 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302598 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002599
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302600 txcp->status = GET_TX_COMPL_BITS(status, compl);
2601 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002602
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302603 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002604 queue_tail_inc(tx_cq);
2605 return txcp;
2606}
2607
Sathya Perla3c8def92011-06-12 20:01:58 +00002608static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302609 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002610{
Sathya Perla3c8def92011-06-12 20:01:58 +00002611 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002612 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002613 struct sk_buff *skb = NULL;
2614 bool unmap_skb_hdr = false;
2615 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302616 u16 num_wrbs = 0;
2617 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002618
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002619 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002620 if (sent_skbs[txq->tail]) {
2621 /* Free skb from prev req */
2622 if (skb)
2623 dev_consume_skb_any(skb);
2624 skb = sent_skbs[txq->tail];
2625 sent_skbs[txq->tail] = NULL;
2626 queue_tail_inc(txq); /* skip hdr wrb */
2627 num_wrbs++;
2628 unmap_skb_hdr = true;
2629 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002630 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002631 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002632 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002633 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002634 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002635 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002636 num_wrbs++;
2637 } while (frag_index != last_index);
2638 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002639
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002640 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002641}
2642
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002643/* Return the number of events in the event queue */
2644static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002645{
2646 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002647 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002648
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002649 do {
2650 eqe = queue_tail_node(&eqo->q);
2651 if (eqe->evt == 0)
2652 break;
2653
2654 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002655 eqe->evt = 0;
2656 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002657 queue_tail_inc(&eqo->q);
2658 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002659
2660 return num;
2661}
2662
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002663/* Leaves the EQ is disarmed state */
2664static void be_eq_clean(struct be_eq_obj *eqo)
2665{
2666 int num = events_get(eqo);
2667
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002668 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002669}
2670
Kalesh AP99b44302015-08-05 03:27:49 -04002671/* Free posted rx buffers that were not used */
2672static void be_rxq_clean(struct be_rx_obj *rxo)
2673{
2674 struct be_queue_info *rxq = &rxo->q;
2675 struct be_rx_page_info *page_info;
2676
2677 while (atomic_read(&rxq->used) > 0) {
2678 page_info = get_rx_page_info(rxo);
2679 put_page(page_info->page);
2680 memset(page_info, 0, sizeof(*page_info));
2681 }
2682 BUG_ON(atomic_read(&rxq->used));
2683 rxq->tail = 0;
2684 rxq->head = 0;
2685}
2686
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002687static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002688{
Sathya Perla3abcded2010-10-03 22:12:27 -07002689 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002690 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002691 struct be_adapter *adapter = rxo->adapter;
2692 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002693
Sathya Perlad23e9462012-12-17 19:38:51 +00002694 /* Consume pending rx completions.
2695 * Wait for the flush completion (identified by zero num_rcvd)
2696 * to arrive. Notify CQ even when there are no more CQ entries
2697 * for HW to flush partially coalesced CQ entries.
2698 * In Lancer, there is no need to wait for flush compl.
2699 */
2700 for (;;) {
2701 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302702 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002703 if (lancer_chip(adapter))
2704 break;
2705
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302706 if (flush_wait++ > 50 ||
2707 be_check_error(adapter,
2708 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002709 dev_warn(&adapter->pdev->dev,
2710 "did not receive flush compl\n");
2711 break;
2712 }
2713 be_cq_notify(adapter, rx_cq->id, true, 0);
2714 mdelay(1);
2715 } else {
2716 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002717 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002718 if (rxcp->num_rcvd == 0)
2719 break;
2720 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002721 }
2722
Sathya Perlad23e9462012-12-17 19:38:51 +00002723 /* After cleanup, leave the CQ in unarmed state */
2724 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002725}
2726
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002727static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002728{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002729 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302730 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302731 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002732 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302733 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302734 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002735 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002736
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302737 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002738 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002739 pending_txqs = adapter->num_tx_qs;
2740
2741 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302742 cmpl = 0;
2743 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002744 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302745 while ((txcp = be_tx_compl_get(txo))) {
2746 num_wrbs +=
2747 be_tx_compl_process(adapter, txo,
2748 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002749 cmpl++;
2750 }
2751 if (cmpl) {
2752 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2753 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302754 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002755 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302756 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002757 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002758 }
2759
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302760 if (pending_txqs == 0 || ++timeo > 10 ||
2761 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002762 break;
2763
2764 mdelay(1);
2765 } while (true);
2766
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002767 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002768 for_all_tx_queues(adapter, txo, i) {
2769 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002770
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002771 if (atomic_read(&txq->used)) {
2772 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2773 i, atomic_read(&txq->used));
2774 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002775 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002776 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2777 txq->len);
2778 /* Use the tx-compl process logic to handle requests
2779 * that were not sent to the HW.
2780 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002781 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2782 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002783 BUG_ON(atomic_read(&txq->used));
2784 txo->pend_wrb_cnt = 0;
2785 /* Since hw was never notified of these requests,
2786 * reset TXQ indices
2787 */
2788 txq->head = notified_idx;
2789 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002790 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002791 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002792}
2793
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002794static void be_evt_queues_destroy(struct be_adapter *adapter)
2795{
2796 struct be_eq_obj *eqo;
2797 int i;
2798
2799 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002800 if (eqo->q.created) {
2801 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002802 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302803 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002804 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002805 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002806 be_queue_free(adapter, &eqo->q);
2807 }
2808}
2809
2810static int be_evt_queues_create(struct be_adapter *adapter)
2811{
2812 struct be_queue_info *eq;
2813 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302814 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002815 int i, rc;
2816
Sathya Perlae2617682016-06-22 08:54:54 -04002817 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302818 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002819 max(adapter->cfg_num_rx_irqs,
2820 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002821
2822 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302823 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002824
Sathya Perla2632baf2013-10-01 16:00:00 +05302825 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002826 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002827 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302828 aic->max_eqd = BE_MAX_EQD;
2829 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002830
2831 eq = &eqo->q;
2832 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302833 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834 if (rc)
2835 return rc;
2836
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302837 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002838 if (rc)
2839 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002840
2841 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2842 return -ENOMEM;
2843 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2844 eqo->affinity_mask);
2845 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2846 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002847 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002848 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002849}
2850
Sathya Perla5fb379e2009-06-18 00:02:59 +00002851static void be_mcc_queues_destroy(struct be_adapter *adapter)
2852{
2853 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002854
Sathya Perla8788fdc2009-07-27 22:52:03 +00002855 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002856 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002857 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002858 be_queue_free(adapter, q);
2859
Sathya Perla8788fdc2009-07-27 22:52:03 +00002860 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002861 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002862 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002863 be_queue_free(adapter, q);
2864}
2865
2866/* Must be called only after TX qs are created as MCC shares TX EQ */
2867static int be_mcc_queues_create(struct be_adapter *adapter)
2868{
2869 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002870
Sathya Perla8788fdc2009-07-27 22:52:03 +00002871 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002872 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302873 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002874 goto err;
2875
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002876 /* Use the default EQ for MCC completions */
2877 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002878 goto mcc_cq_free;
2879
Sathya Perla8788fdc2009-07-27 22:52:03 +00002880 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002881 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2882 goto mcc_cq_destroy;
2883
Sathya Perla8788fdc2009-07-27 22:52:03 +00002884 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002885 goto mcc_q_free;
2886
2887 return 0;
2888
2889mcc_q_free:
2890 be_queue_free(adapter, q);
2891mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002892 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002893mcc_cq_free:
2894 be_queue_free(adapter, cq);
2895err:
2896 return -1;
2897}
2898
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002899static void be_tx_queues_destroy(struct be_adapter *adapter)
2900{
2901 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002902 struct be_tx_obj *txo;
2903 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002904
Sathya Perla3c8def92011-06-12 20:01:58 +00002905 for_all_tx_queues(adapter, txo, i) {
2906 q = &txo->q;
2907 if (q->created)
2908 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2909 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002910
Sathya Perla3c8def92011-06-12 20:01:58 +00002911 q = &txo->cq;
2912 if (q->created)
2913 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2914 be_queue_free(adapter, q);
2915 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002916}
2917
Sathya Perla77071332013-08-27 16:57:34 +05302918static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002919{
Sathya Perla73f394e2015-03-26 03:05:09 -04002920 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002921 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002922 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302923 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002924
Sathya Perlae2617682016-06-22 08:54:54 -04002925 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002926
Sathya Perla3c8def92011-06-12 20:01:58 +00002927 for_all_tx_queues(adapter, txo, i) {
2928 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002929 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2930 sizeof(struct be_eth_tx_compl));
2931 if (status)
2932 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933
John Stultz827da442013-10-07 15:51:58 -07002934 u64_stats_init(&txo->stats.sync);
2935 u64_stats_init(&txo->stats.sync_compl);
2936
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002937 /* If num_evt_qs is less than num_tx_qs, then more than
2938 * one txq share an eq
2939 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002940 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2941 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002942 if (status)
2943 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002945 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2946 sizeof(struct be_eth_wrb));
2947 if (status)
2948 return status;
2949
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002950 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002951 if (status)
2952 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002953
2954 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2955 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002956 }
2957
Sathya Perlad3791422012-09-28 04:39:44 +00002958 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2959 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 return 0;
2961}
2962
2963static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002964{
2965 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002966 struct be_rx_obj *rxo;
2967 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002968
Sathya Perla3abcded2010-10-03 22:12:27 -07002969 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002970 q = &rxo->cq;
2971 if (q->created)
2972 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2973 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002974 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002975}
2976
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002977static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002978{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002979 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002980 struct be_rx_obj *rxo;
2981 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002982
Sathya Perlae2617682016-06-22 08:54:54 -04002983 adapter->num_rss_qs =
2984 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05302985
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002986 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04002987 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002988 adapter->num_rss_qs = 0;
2989
2990 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2991
2992 /* When the interface is not capable of RSS rings (and there is no
2993 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002994 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002995 if (adapter->num_rx_qs == 0)
2996 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302997
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002998 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002999 for_all_rx_queues(adapter, rxo, i) {
3000 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07003001 cq = &rxo->cq;
3002 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05303003 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07003004 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003005 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006
John Stultz827da442013-10-07 15:51:58 -07003007 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003008 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3009 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07003010 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003011 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07003012 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003013
Sathya Perlad3791422012-09-28 04:39:44 +00003014 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003015 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003016 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00003017}
3018
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003019static irqreturn_t be_intx(int irq, void *dev)
3020{
Sathya Perlae49cc342012-11-27 19:50:02 +00003021 struct be_eq_obj *eqo = dev;
3022 struct be_adapter *adapter = eqo->adapter;
3023 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003024
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003025 /* IRQ is not expected when NAPI is scheduled as the EQ
3026 * will not be armed.
3027 * But, this can happen on Lancer INTx where it takes
3028 * a while to de-assert INTx or in BE2 where occasionaly
3029 * an interrupt may be raised even when EQ is unarmed.
3030 * If NAPI is already scheduled, then counting & notifying
3031 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00003032 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003033 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003034 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003035 __napi_schedule(&eqo->napi);
3036 if (num_evts)
3037 eqo->spurious_intr = 0;
3038 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003039 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00003040
3041 /* Return IRQ_HANDLED only for the the first spurious intr
3042 * after a valid intr to stop the kernel from branding
3043 * this irq as a bad one!
3044 */
3045 if (num_evts || eqo->spurious_intr++ == 0)
3046 return IRQ_HANDLED;
3047 else
3048 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003049}
3050
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003051static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003053 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003054
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003055 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00003056 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003057 return IRQ_HANDLED;
3058}
3059
Sathya Perla2e588f82011-03-11 02:49:26 +00003060static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003061{
Somnath Koture38b1702013-05-29 22:55:56 +00003062 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003063}
3064
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003065static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05303066 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067{
Sathya Perla3abcded2010-10-03 22:12:27 -07003068 struct be_adapter *adapter = rxo->adapter;
3069 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00003070 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05303072 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073
3074 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07003075 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003076 if (!rxcp)
3077 break;
3078
Sathya Perla12004ae2011-08-02 19:57:46 +00003079 /* Is it a flush compl that has no data */
3080 if (unlikely(rxcp->num_rcvd == 0))
3081 goto loop_continue;
3082
3083 /* Discard compl with partial DMA Lancer B0 */
3084 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003085 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003086 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00003087 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00003088
Sathya Perla12004ae2011-08-02 19:57:46 +00003089 /* On BE drop pkts that arrive due to imperfect filtering in
3090 * promiscuous mode on some skews
3091 */
3092 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05303093 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003094 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003095 goto loop_continue;
3096 }
3097
Sathya Perla6384a4d2013-10-25 10:40:16 +05303098 /* Don't do gro when we're busy_polling */
3099 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003100 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00003101 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05303102 be_rx_compl_process(rxo, napi, rxcp);
3103
Sathya Perla12004ae2011-08-02 19:57:46 +00003104loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05303105 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00003106 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003107 }
3108
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003109 if (work_done) {
3110 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003111
Sathya Perla6384a4d2013-10-25 10:40:16 +05303112 /* When an rx-obj gets into post_starved state, just
3113 * let be_worker do the posting.
3114 */
3115 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3116 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303117 be_post_rx_frags(rxo, GFP_ATOMIC,
3118 max_t(u32, MAX_RX_POST,
3119 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003120 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003121
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003122 return work_done;
3123}
3124
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303125static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303126{
3127 switch (status) {
3128 case BE_TX_COMP_HDR_PARSE_ERR:
3129 tx_stats(txo)->tx_hdr_parse_err++;
3130 break;
3131 case BE_TX_COMP_NDMA_ERR:
3132 tx_stats(txo)->tx_dma_err++;
3133 break;
3134 case BE_TX_COMP_ACL_ERR:
3135 tx_stats(txo)->tx_spoof_check_err++;
3136 break;
3137 }
3138}
3139
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303140static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303141{
3142 switch (status) {
3143 case LANCER_TX_COMP_LSO_ERR:
3144 tx_stats(txo)->tx_tso_err++;
3145 break;
3146 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3147 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3148 tx_stats(txo)->tx_spoof_check_err++;
3149 break;
3150 case LANCER_TX_COMP_QINQ_ERR:
3151 tx_stats(txo)->tx_qinq_err++;
3152 break;
3153 case LANCER_TX_COMP_PARITY_ERR:
3154 tx_stats(txo)->tx_internal_parity_err++;
3155 break;
3156 case LANCER_TX_COMP_DMA_ERR:
3157 tx_stats(txo)->tx_dma_err++;
3158 break;
3159 }
3160}
3161
Sathya Perlac8f64612014-09-02 09:56:55 +05303162static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3163 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003164{
Sathya Perlac8f64612014-09-02 09:56:55 +05303165 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303166 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003167
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303168 while ((txcp = be_tx_compl_get(txo))) {
3169 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303170 work_done++;
3171
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303172 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303173 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303174 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303175 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303176 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303177 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003178 }
3179
3180 if (work_done) {
3181 be_cq_notify(adapter, txo->cq.id, true, work_done);
3182 atomic_sub(num_wrbs, &txo->q.used);
3183
3184 /* As Tx wrbs have been freed up, wake up netdev queue
3185 * if it was stopped due to lack of tx wrbs. */
3186 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303187 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003188 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003189 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003191 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3192 tx_stats(txo)->tx_compl += work_done;
3193 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3194 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003195}
Sathya Perla3c8def92011-06-12 20:01:58 +00003196
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003197#ifdef CONFIG_NET_RX_BUSY_POLL
3198static inline bool be_lock_napi(struct be_eq_obj *eqo)
3199{
3200 bool status = true;
3201
3202 spin_lock(&eqo->lock); /* BH is already disabled */
3203 if (eqo->state & BE_EQ_LOCKED) {
3204 WARN_ON(eqo->state & BE_EQ_NAPI);
3205 eqo->state |= BE_EQ_NAPI_YIELD;
3206 status = false;
3207 } else {
3208 eqo->state = BE_EQ_NAPI;
3209 }
3210 spin_unlock(&eqo->lock);
3211 return status;
3212}
3213
3214static inline void be_unlock_napi(struct be_eq_obj *eqo)
3215{
3216 spin_lock(&eqo->lock); /* BH is already disabled */
3217
3218 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3219 eqo->state = BE_EQ_IDLE;
3220
3221 spin_unlock(&eqo->lock);
3222}
3223
3224static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3225{
3226 bool status = true;
3227
3228 spin_lock_bh(&eqo->lock);
3229 if (eqo->state & BE_EQ_LOCKED) {
3230 eqo->state |= BE_EQ_POLL_YIELD;
3231 status = false;
3232 } else {
3233 eqo->state |= BE_EQ_POLL;
3234 }
3235 spin_unlock_bh(&eqo->lock);
3236 return status;
3237}
3238
3239static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3240{
3241 spin_lock_bh(&eqo->lock);
3242
3243 WARN_ON(eqo->state & (BE_EQ_NAPI));
3244 eqo->state = BE_EQ_IDLE;
3245
3246 spin_unlock_bh(&eqo->lock);
3247}
3248
3249static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3250{
3251 spin_lock_init(&eqo->lock);
3252 eqo->state = BE_EQ_IDLE;
3253}
3254
3255static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3256{
3257 local_bh_disable();
3258
3259 /* It's enough to just acquire napi lock on the eqo to stop
3260 * be_busy_poll() from processing any queueus.
3261 */
3262 while (!be_lock_napi(eqo))
3263 mdelay(1);
3264
3265 local_bh_enable();
3266}
3267
3268#else /* CONFIG_NET_RX_BUSY_POLL */
3269
3270static inline bool be_lock_napi(struct be_eq_obj *eqo)
3271{
3272 return true;
3273}
3274
3275static inline void be_unlock_napi(struct be_eq_obj *eqo)
3276{
3277}
3278
3279static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3280{
3281 return false;
3282}
3283
3284static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3285{
3286}
3287
3288static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3289{
3290}
3291
3292static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3293{
3294}
3295#endif /* CONFIG_NET_RX_BUSY_POLL */
3296
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303297int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003298{
3299 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3300 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003301 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303302 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303303 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003304 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003305
Sathya Perla0b545a62012-11-23 00:27:18 +00003306 num_evts = events_get(eqo);
3307
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303308 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3309 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310
Sathya Perla6384a4d2013-10-25 10:40:16 +05303311 if (be_lock_napi(eqo)) {
3312 /* This loop will iterate twice for EQ0 in which
3313 * completions of the last RXQ (default one) are also processed
3314 * For other EQs the loop iterates only once
3315 */
3316 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3317 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3318 max_work = max(work, max_work);
3319 }
3320 be_unlock_napi(eqo);
3321 } else {
3322 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003323 }
3324
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003325 if (is_mcc_eqo(eqo))
3326 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003327
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003328 if (max_work < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08003329 napi_complete_done(napi, max_work);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003330
3331 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3332 * delay via a delay multiplier encoding value
3333 */
3334 if (skyhawk_chip(adapter))
3335 mult_enc = be_get_eq_delay_mult_enc(eqo);
3336
3337 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3338 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003339 } else {
3340 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003341 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003342 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003343 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003344}
3345
Sathya Perla6384a4d2013-10-25 10:40:16 +05303346#ifdef CONFIG_NET_RX_BUSY_POLL
3347static int be_busy_poll(struct napi_struct *napi)
3348{
3349 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3350 struct be_adapter *adapter = eqo->adapter;
3351 struct be_rx_obj *rxo;
3352 int i, work = 0;
3353
3354 if (!be_lock_busy_poll(eqo))
3355 return LL_FLUSH_BUSY;
3356
3357 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3358 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3359 if (work)
3360 break;
3361 }
3362
3363 be_unlock_busy_poll(eqo);
3364 return work;
3365}
3366#endif
3367
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003368void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003369{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003370 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3371 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003372 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303373 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003374
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303375 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003376 return;
3377
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003378 if (lancer_chip(adapter)) {
3379 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3380 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303381 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003382 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303383 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003384 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303385 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303386 /* Do not log error messages if its a FW reset */
3387 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3388 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3389 dev_info(dev, "Firmware update in progress\n");
3390 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303391 dev_err(dev, "Error detected in the card\n");
3392 dev_err(dev, "ERR: sliport status 0x%x\n",
3393 sliport_status);
3394 dev_err(dev, "ERR: sliport error1 0x%x\n",
3395 sliport_err1);
3396 dev_err(dev, "ERR: sliport error2 0x%x\n",
3397 sliport_err2);
3398 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003399 }
3400 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003401 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3402 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3403 ue_lo_mask = ioread32(adapter->pcicfg +
3404 PCICFG_UE_STATUS_LOW_MASK);
3405 ue_hi_mask = ioread32(adapter->pcicfg +
3406 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003407
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003408 ue_lo = (ue_lo & ~ue_lo_mask);
3409 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003410
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303411 /* On certain platforms BE hardware can indicate spurious UEs.
3412 * Allow HW to stop working completely in case of a real UE.
3413 * Hence not setting the hw_error for UE detection.
3414 */
3415
3416 if (ue_lo || ue_hi) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303417 dev_err(dev, "Error detected in the adapter");
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303418 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303419 be_set_error(adapter, BE_ERROR_UE);
3420
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303421 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3422 if (ue_lo & 1)
3423 dev_err(dev, "UE: %s bit set\n",
3424 ue_status_low_desc[i]);
3425 }
3426 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3427 if (ue_hi & 1)
3428 dev_err(dev, "UE: %s bit set\n",
3429 ue_status_hi_desc[i]);
3430 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303431 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003432 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003433}
3434
Sathya Perla8d56ff12009-11-22 22:02:26 +00003435static void be_msix_disable(struct be_adapter *adapter)
3436{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003437 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003438 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003439 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303440 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003441 }
3442}
3443
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003444static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003445{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003446 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003447 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003448 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003449
Sathya Perlace7faf02016-06-22 08:54:53 -04003450 /* If RoCE is supported, program the max number of vectors that
3451 * could be used for NIC and RoCE, else, just program the number
3452 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303453 */
Sathya Perlae2617682016-06-22 08:54:54 -04003454 if (be_roce_supported(adapter)) {
3455 max_roce_eqs =
3456 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3457 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3458 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3459 } else {
3460 num_vec = max(adapter->cfg_num_rx_irqs,
3461 adapter->cfg_num_tx_irqs);
3462 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003463
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003464 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003465 adapter->msix_entries[i].entry = i;
3466
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003467 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3468 MIN_MSIX_VECTORS, num_vec);
3469 if (num_vec < 0)
3470 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003471
Sathya Perla92bf14a2013-08-27 16:57:32 +05303472 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3473 adapter->num_msix_roce_vec = num_vec / 2;
3474 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3475 adapter->num_msix_roce_vec);
3476 }
3477
3478 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3479
3480 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3481 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003482 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003483
3484fail:
3485 dev_warn(dev, "MSIx enable failed\n");
3486
3487 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003488 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003489 return num_vec;
3490 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003491}
3492
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003493static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303494 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003495{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303496 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003497}
3498
3499static int be_msix_register(struct be_adapter *adapter)
3500{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003501 struct net_device *netdev = adapter->netdev;
3502 struct be_eq_obj *eqo;
3503 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003504
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003505 for_all_evt_queues(adapter, eqo, i) {
3506 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3507 vec = be_msix_vec_get(adapter, eqo);
3508 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003509 if (status)
3510 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003511
3512 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003513 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003514
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003515 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003516err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303517 for (i--; i >= 0; i--) {
3518 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003519 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303520 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003521 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303522 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003523 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003524 return status;
3525}
3526
3527static int be_irq_register(struct be_adapter *adapter)
3528{
3529 struct net_device *netdev = adapter->netdev;
3530 int status;
3531
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003532 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003533 status = be_msix_register(adapter);
3534 if (status == 0)
3535 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003536 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003537 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003538 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003539 }
3540
Sathya Perlae49cc342012-11-27 19:50:02 +00003541 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003542 netdev->irq = adapter->pdev->irq;
3543 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003544 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003545 if (status) {
3546 dev_err(&adapter->pdev->dev,
3547 "INTx request IRQ failed - err %d\n", status);
3548 return status;
3549 }
3550done:
3551 adapter->isr_registered = true;
3552 return 0;
3553}
3554
3555static void be_irq_unregister(struct be_adapter *adapter)
3556{
3557 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003558 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003559 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003560
3561 if (!adapter->isr_registered)
3562 return;
3563
3564 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003565 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003566 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003567 goto done;
3568 }
3569
3570 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003571 for_all_evt_queues(adapter, eqo, i) {
3572 vec = be_msix_vec_get(adapter, eqo);
3573 irq_set_affinity_hint(vec, NULL);
3574 free_irq(vec, eqo);
3575 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003576
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003577done:
3578 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003579}
3580
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003581static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003582{
Ajit Khaparde62219062016-02-10 22:45:53 +05303583 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003584 struct be_queue_info *q;
3585 struct be_rx_obj *rxo;
3586 int i;
3587
3588 for_all_rx_queues(adapter, rxo, i) {
3589 q = &rxo->q;
3590 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003591 /* If RXQs are destroyed while in an "out of buffer"
3592 * state, there is a possibility of an HW stall on
3593 * Lancer. So, post 64 buffers to each queue to relieve
3594 * the "out of buffer" condition.
3595 * Make sure there's space in the RXQ before posting.
3596 */
3597 if (lancer_chip(adapter)) {
3598 be_rx_cq_clean(rxo);
3599 if (atomic_read(&q->used) == 0)
3600 be_post_rx_frags(rxo, GFP_KERNEL,
3601 MAX_RX_POST);
3602 }
3603
Sathya Perla482c9e72011-06-29 23:33:17 +00003604 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003605 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003606 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003607 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003608 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003609 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303610
3611 if (rss->rss_flags) {
3612 rss->rss_flags = RSS_ENABLE_NONE;
3613 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3614 128, rss->rss_hkey);
3615 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003616}
3617
Kalesh APbcc84142015-08-05 03:27:48 -04003618static void be_disable_if_filters(struct be_adapter *adapter)
3619{
Ivan Vecera6d928ae2017-01-13 22:38:28 +01003620 /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
3621 if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
Ivan Vecera4993b392017-01-31 20:01:31 +01003622 check_privilege(adapter, BE_PRIV_FILTMGMT)) {
Ivan Vecera6d928ae2017-01-13 22:38:28 +01003623 be_dev_mac_del(adapter, adapter->pmac_id[0]);
Ivan Vecera4993b392017-01-31 20:01:31 +01003624 eth_zero_addr(adapter->dev_mac);
3625 }
Ivan Vecera6d928ae2017-01-13 22:38:28 +01003626
Kalesh APbcc84142015-08-05 03:27:48 -04003627 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003628 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003629
3630 /* The IFACE flags are enabled in the open path and cleared
3631 * in the close path. When a VF gets detached from the host and
3632 * assigned to a VM the following happens:
3633 * - VF's IFACE flags get cleared in the detach path
3634 * - IFACE create is issued by the VF in the attach path
3635 * Due to a bug in the BE3/Skyhawk-R FW
3636 * (Lancer FW doesn't have the bug), the IFACE capability flags
3637 * specified along with the IFACE create cmd issued by a VF are not
3638 * honoured by FW. As a consequence, if a *new* driver
3639 * (that enables/disables IFACE flags in open/close)
3640 * is loaded in the host and an *old* driver is * used by a VM/VF,
3641 * the IFACE gets created *without* the needed flags.
3642 * To avoid this, disable RX-filter flags only for Lancer.
3643 */
3644 if (lancer_chip(adapter)) {
3645 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3646 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3647 }
3648}
3649
Sathya Perla889cd4b2010-05-30 23:33:45 +00003650static int be_close(struct net_device *netdev)
3651{
3652 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003653 struct be_eq_obj *eqo;
3654 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003655
Kalesh APe1ad8e32014-04-14 16:12:41 +05303656 /* This protection is needed as be_close() may be called even when the
3657 * adapter is in cleared state (after eeh perm failure)
3658 */
3659 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3660 return 0;
3661
Sathya Perlab7172412016-07-27 05:26:18 -04003662 /* Before attempting cleanup ensure all the pending cmds in the
3663 * config_wq have finished execution
3664 */
3665 flush_workqueue(be_wq);
3666
Kalesh APbcc84142015-08-05 03:27:48 -04003667 be_disable_if_filters(adapter);
3668
Ivan Veceradff345c52013-11-27 08:59:32 +01003669 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3670 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003671 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303672 be_disable_busy_poll(eqo);
3673 }
David S. Miller71237b62013-11-28 18:53:36 -05003674 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003675 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003676
3677 be_async_mcc_disable(adapter);
3678
3679 /* Wait for all pending tx completions to arrive so that
3680 * all tx skbs are freed.
3681 */
Sathya Perlafba87552013-05-08 02:05:50 +00003682 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303683 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003684
3685 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003686
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003687 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003688 if (msix_enabled(adapter))
3689 synchronize_irq(be_msix_vec_get(adapter, eqo));
3690 else
3691 synchronize_irq(netdev->irq);
3692 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003693 }
3694
Sathya Perla889cd4b2010-05-30 23:33:45 +00003695 be_irq_unregister(adapter);
3696
Sathya Perla482c9e72011-06-29 23:33:17 +00003697 return 0;
3698}
3699
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003700static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003701{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003702 struct rss_info *rss = &adapter->rss_info;
3703 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003704 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003705 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003706
3707 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003708 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3709 sizeof(struct be_eth_rx_d));
3710 if (rc)
3711 return rc;
3712 }
3713
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003714 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3715 rxo = default_rxo(adapter);
3716 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3717 rx_frag_size, adapter->if_handle,
3718 false, &rxo->rss_id);
3719 if (rc)
3720 return rc;
3721 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003722
3723 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003724 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003725 rx_frag_size, adapter->if_handle,
3726 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003727 if (rc)
3728 return rc;
3729 }
3730
3731 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003732 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003733 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303734 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003735 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303736 rss->rsstable[j + i] = rxo->rss_id;
3737 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003738 }
3739 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303740 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3741 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003742
3743 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303744 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3745 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303746
3747 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3748 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3749 RSS_INDIR_TABLE_LEN, rss_key);
3750 if (rc) {
3751 rss->rss_flags = RSS_ENABLE_NONE;
3752 return rc;
3753 }
3754
3755 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303756 } else {
3757 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303758 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303759 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003760
Venkata Duvvurue2557872014-04-21 15:38:00 +05303761
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003762 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3763 * which is a queue empty condition
3764 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003765 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003766 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3767
Sathya Perla889cd4b2010-05-30 23:33:45 +00003768 return 0;
3769}
3770
Kalesh APbcc84142015-08-05 03:27:48 -04003771static int be_enable_if_filters(struct be_adapter *adapter)
3772{
3773 int status;
3774
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003775 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003776 if (status)
3777 return status;
3778
Ivan Vecera4993b392017-01-31 20:01:31 +01003779 /* Normally this condition usually true as the ->dev_mac is zeroed.
3780 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3781 * subsequent be_dev_mac_add() can fail (after fresh boot)
3782 */
3783 if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3784 int old_pmac_id = -1;
3785
3786 /* Remember old programmed MAC if any - can happen on BE3 VF */
3787 if (!is_zero_ether_addr(adapter->dev_mac))
3788 old_pmac_id = adapter->pmac_id[0];
3789
Suresh Reddy988d44b2016-09-07 19:57:52 +05303790 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003791 if (status)
3792 return status;
Ivan Vecera4993b392017-01-31 20:01:31 +01003793
3794 /* Delete the old programmed MAC as we successfully programmed
3795 * a new MAC
3796 */
3797 if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3798 be_dev_mac_del(adapter, old_pmac_id);
3799
Suresh Reddyc27ebf52016-09-07 19:57:53 +05303800 ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
Kalesh APbcc84142015-08-05 03:27:48 -04003801 }
3802
3803 if (adapter->vlans_added)
3804 be_vid_config(adapter);
3805
Sathya Perlab7172412016-07-27 05:26:18 -04003806 __be_set_rx_mode(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003807
3808 return 0;
3809}
3810
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003811static int be_open(struct net_device *netdev)
3812{
3813 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003814 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003815 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003816 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003817 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003818 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003819
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003820 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003821 if (status)
3822 goto err;
3823
Kalesh APbcc84142015-08-05 03:27:48 -04003824 status = be_enable_if_filters(adapter);
3825 if (status)
3826 goto err;
3827
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003828 status = be_irq_register(adapter);
3829 if (status)
3830 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003831
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003832 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003833 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003834
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003835 for_all_tx_queues(adapter, txo, i)
3836 be_cq_notify(adapter, txo->cq.id, true, 0);
3837
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003838 be_async_mcc_enable(adapter);
3839
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003840 for_all_evt_queues(adapter, eqo, i) {
3841 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303842 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003843 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003844 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003845 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003846
Sathya Perla323ff712012-09-28 04:39:43 +00003847 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003848 if (!status)
3849 be_link_status_update(adapter, link_status);
3850
Sathya Perlafba87552013-05-08 02:05:50 +00003851 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303852 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003853 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303854
Sathya Perla889cd4b2010-05-30 23:33:45 +00003855 return 0;
3856err:
3857 be_close(adapter->netdev);
3858 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003859}
3860
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003861static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3862{
3863 u32 addr;
3864
3865 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3866
3867 mac[5] = (u8)(addr & 0xFF);
3868 mac[4] = (u8)((addr >> 8) & 0xFF);
3869 mac[3] = (u8)((addr >> 16) & 0xFF);
3870 /* Use the OUI from the current MAC address */
3871 memcpy(mac, adapter->netdev->dev_addr, 3);
3872}
3873
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003874/*
3875 * Generate a seed MAC address from the PF MAC Address using jhash.
3876 * MAC Address for VFs are assigned incrementally starting from the seed.
3877 * These addresses are programmed in the ASIC by the PF and the VF driver
3878 * queries for the MAC address during its probe.
3879 */
Sathya Perla4c876612013-02-03 20:30:11 +00003880static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003881{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003882 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003883 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003884 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003885 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003886
3887 be_vf_eth_addr_generate(adapter, mac);
3888
Sathya Perla11ac75e2011-12-13 00:58:50 +00003889 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303890 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003891 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003892 vf_cfg->if_handle,
3893 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303894 else
3895 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3896 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003897
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003898 if (status)
3899 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303900 "Mac address assignment failed for VF %d\n",
3901 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003902 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003903 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003904
3905 mac[5] += 1;
3906 }
3907 return status;
3908}
3909
Sathya Perla4c876612013-02-03 20:30:11 +00003910static int be_vfs_mac_query(struct be_adapter *adapter)
3911{
3912 int status, vf;
3913 u8 mac[ETH_ALEN];
3914 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003915
3916 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303917 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3918 mac, vf_cfg->if_handle,
3919 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003920 if (status)
3921 return status;
3922 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3923 }
3924 return 0;
3925}
3926
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003927static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003928{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003929 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003930 u32 vf;
3931
Sathya Perla257a3fe2013-06-14 15:54:51 +05303932 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003933 dev_warn(&adapter->pdev->dev,
3934 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003935 goto done;
3936 }
3937
Sathya Perlab4c1df92013-05-08 02:05:47 +00003938 pci_disable_sriov(adapter->pdev);
3939
Sathya Perla11ac75e2011-12-13 00:58:50 +00003940 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303941 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003942 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3943 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303944 else
3945 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3946 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003947
Sathya Perla11ac75e2011-12-13 00:58:50 +00003948 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3949 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003950
3951 if (BE3_chip(adapter))
3952 be_cmd_set_hsw_config(adapter, 0, 0,
3953 adapter->if_handle,
3954 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003955done:
3956 kfree(adapter->vf_cfg);
3957 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303958 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003959}
3960
Sathya Perla77071332013-08-27 16:57:34 +05303961static void be_clear_queues(struct be_adapter *adapter)
3962{
3963 be_mcc_queues_destroy(adapter);
3964 be_rx_cqs_destroy(adapter);
3965 be_tx_queues_destroy(adapter);
3966 be_evt_queues_destroy(adapter);
3967}
3968
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303969static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003970{
Sathya Perla191eb752012-02-23 18:50:13 +00003971 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3972 cancel_delayed_work_sync(&adapter->work);
3973 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3974 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303975}
3976
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003977static void be_cancel_err_detection(struct be_adapter *adapter)
3978{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303979 struct be_error_recovery *err_rec = &adapter->error_recovery;
3980
3981 if (!be_err_recovery_workq)
3982 return;
3983
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003984 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05303985 cancel_delayed_work_sync(&err_rec->err_detection_work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003986 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3987 }
3988}
3989
Sathya Perlac9c47142014-03-27 10:46:19 +05303990static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3991{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003992 struct net_device *netdev = adapter->netdev;
3993
Sathya Perlac9c47142014-03-27 10:46:19 +05303994 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3995 be_cmd_manage_iface(adapter, adapter->if_handle,
3996 OP_CONVERT_TUNNEL_TO_NORMAL);
3997
3998 if (adapter->vxlan_port)
3999 be_cmd_set_vxlan_port(adapter, 0);
4000
4001 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
4002 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004003
4004 netdev->hw_enc_features = 0;
4005 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304006 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05304007}
4008
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004009static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4010 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05004011{
4012 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004013 u32 vf_if_cap_flags = res.vf_if_cap_flags;
4014 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004015 u16 num_vf_qs = 1;
4016
Somnath Koturde2b1e02016-06-06 07:22:10 -04004017 /* Distribute the queue resources among the PF and it's VFs */
4018 if (num_vfs) {
4019 /* Divide the rx queues evenly among the VFs and the PF, capped
4020 * at VF-EQ-count. Any remainder queues belong to the PF.
4021 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05304022 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4023 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05004024
Somnath Koturde2b1e02016-06-06 07:22:10 -04004025 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4026 * RSS Tables per port. Provide RSS on VFs, only if number of
4027 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05004028 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004029 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05004030 num_vf_qs = 1;
4031 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004032
4033 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4034 * which are modifiable using SET_PROFILE_CONFIG cmd.
4035 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004036 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4037 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004038
4039 /* If RSS IFACE capability flags are modifiable for a VF, set the
4040 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4041 * more than 1 RSSQ is available for a VF.
4042 * Otherwise, provision only 1 queue pair for VF.
4043 */
4044 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4045 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4046 if (num_vf_qs > 1) {
4047 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4048 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4049 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4050 } else {
4051 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4052 BE_IF_FLAGS_DEFQ_RSS);
4053 }
4054 } else {
4055 num_vf_qs = 1;
4056 }
4057
4058 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4059 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4060 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4061 }
4062
4063 vft_res->vf_if_cap_flags = vf_if_cap_flags;
4064 vft_res->max_rx_qs = num_vf_qs;
4065 vft_res->max_rss_qs = num_vf_qs;
4066 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4067 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4068
4069 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4070 * among the PF and it's VFs, if the fields are changeable
4071 */
4072 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4073 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4074
4075 if (res_mod.max_vlans == FIELD_MODIFIABLE)
4076 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4077
4078 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4079 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4080
4081 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4082 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004083}
4084
Sathya Perlab7172412016-07-27 05:26:18 -04004085static void be_if_destroy(struct be_adapter *adapter)
4086{
4087 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4088
4089 kfree(adapter->pmac_id);
4090 adapter->pmac_id = NULL;
4091
4092 kfree(adapter->mc_list);
4093 adapter->mc_list = NULL;
4094
4095 kfree(adapter->uc_list);
4096 adapter->uc_list = NULL;
4097}
4098
Somnath Koturb05004a2013-12-05 12:08:16 +05304099static int be_clear(struct be_adapter *adapter)
4100{
Vasundhara Volamf2858732015-03-04 00:44:33 -05004101 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004102 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05004103
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304104 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00004105
Sathya Perlab7172412016-07-27 05:26:18 -04004106 flush_workqueue(be_wq);
4107
Sathya Perla11ac75e2011-12-13 00:58:50 +00004108 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004109 be_vf_clear(adapter);
4110
Vasundhara Volambec84e62014-06-30 13:01:32 +05304111 /* Re-configure FW to distribute resources evenly across max-supported
4112 * number of VFs, only when VFs are not already enabled.
4113 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05004114 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4115 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004116 be_calculate_vf_res(adapter,
4117 pci_sriov_get_totalvfs(pdev),
4118 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304119 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004120 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004121 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05004122 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304123
Sathya Perlac9c47142014-03-27 10:46:19 +05304124 be_disable_vxlan_offloads(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004125
Sathya Perlab7172412016-07-27 05:26:18 -04004126 be_if_destroy(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004127
Sathya Perla77071332013-08-27 16:57:34 +05304128 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004129
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004130 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304131 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004132 return 0;
4133}
4134
Sathya Perla4c876612013-02-03 20:30:11 +00004135static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004136{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304137 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04004138 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00004139 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05004140 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004141
Kalesh AP0700d812015-01-20 03:51:43 -05004142 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004143 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004144
Sathya Perla4c876612013-02-03 20:30:11 +00004145 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304146 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04004147 status = be_cmd_get_profile_config(adapter, &res, NULL,
4148 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05004149 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05304150 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004151 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304152 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004153 /* Prevent VFs from enabling VLAN promiscuous
4154 * mode
4155 */
4156 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4157 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304158 }
Sathya Perla4c876612013-02-03 20:30:11 +00004159
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004160 /* PF should enable IF flags during proxy if_create call */
4161 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004162 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4163 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004164 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004165 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004166 }
Kalesh AP0700d812015-01-20 03:51:43 -05004167
4168 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004169}
4170
Sathya Perla39f1d942012-05-08 19:41:24 +00004171static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004172{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004173 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004174 int vf;
4175
Sathya Perla39f1d942012-05-08 19:41:24 +00004176 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4177 GFP_KERNEL);
4178 if (!adapter->vf_cfg)
4179 return -ENOMEM;
4180
Sathya Perla11ac75e2011-12-13 00:58:50 +00004181 for_all_vfs(adapter, vf_cfg, vf) {
4182 vf_cfg->if_handle = -1;
4183 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004184 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004185 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004186}
4187
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004188static int be_vf_setup(struct be_adapter *adapter)
4189{
Sathya Perla4c876612013-02-03 20:30:11 +00004190 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304191 struct be_vf_cfg *vf_cfg;
4192 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004193 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004194
Sathya Perla257a3fe2013-06-14 15:54:51 +05304195 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004196
4197 status = be_vf_setup_init(adapter);
4198 if (status)
4199 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004200
Sathya Perla4c876612013-02-03 20:30:11 +00004201 if (old_vfs) {
4202 for_all_vfs(adapter, vf_cfg, vf) {
4203 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4204 if (status)
4205 goto err;
4206 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004207
Sathya Perla4c876612013-02-03 20:30:11 +00004208 status = be_vfs_mac_query(adapter);
4209 if (status)
4210 goto err;
4211 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304212 status = be_vfs_if_create(adapter);
4213 if (status)
4214 goto err;
4215
Sathya Perla39f1d942012-05-08 19:41:24 +00004216 status = be_vf_eth_addr_config(adapter);
4217 if (status)
4218 goto err;
4219 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004220
Sathya Perla11ac75e2011-12-13 00:58:50 +00004221 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304222 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004223 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4224 vf + 1);
4225 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304226 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004227 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304228 BE_PRIV_FILTMGMT,
4229 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004230 if (!status) {
4231 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304232 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4233 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004234 }
Sathya Perla04a06022013-07-23 15:25:00 +05304235 }
4236
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304237 /* Allow full available bandwidth */
4238 if (!old_vfs)
4239 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004240
Kalesh APe7bcbd72015-05-06 05:30:32 -04004241 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4242 vf_cfg->if_handle, NULL,
4243 &spoofchk);
4244 if (!status)
4245 vf_cfg->spoofchk = spoofchk;
4246
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304247 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304248 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304249 be_cmd_set_logical_link_config(adapter,
4250 IFLA_VF_LINK_STATE_AUTO,
4251 vf+1);
4252 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004253 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004254
4255 if (!old_vfs) {
4256 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4257 if (status) {
4258 dev_err(dev, "SRIOV enable failed\n");
4259 adapter->num_vfs = 0;
4260 goto err;
4261 }
4262 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304263
Somnath Kotur884476b2016-06-22 08:54:55 -04004264 if (BE3_chip(adapter)) {
4265 /* On BE3, enable VEB only when SRIOV is enabled */
4266 status = be_cmd_set_hsw_config(adapter, 0, 0,
4267 adapter->if_handle,
4268 PORT_FWD_TYPE_VEB, 0);
4269 if (status)
4270 goto err;
4271 }
4272
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304273 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004274 return 0;
4275err:
Sathya Perla4c876612013-02-03 20:30:11 +00004276 dev_err(dev, "VF setup failed\n");
4277 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004278 return status;
4279}
4280
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304281/* Converting function_mode bits on BE3 to SH mc_type enums */
4282
4283static u8 be_convert_mc_type(u32 function_mode)
4284{
Suresh Reddy66064db2014-06-23 16:41:29 +05304285 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304286 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304287 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304288 return FLEX10;
4289 else if (function_mode & VNIC_MODE)
4290 return vNIC2;
4291 else if (function_mode & UMC_ENABLED)
4292 return UMC;
4293 else
4294 return MC_NONE;
4295}
4296
Sathya Perla92bf14a2013-08-27 16:57:32 +05304297/* On BE2/BE3 FW does not suggest the supported limits */
4298static void BEx_get_resources(struct be_adapter *adapter,
4299 struct be_resources *res)
4300{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304301 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304302
4303 if (be_physfn(adapter))
4304 res->max_uc_mac = BE_UC_PMAC_COUNT;
4305 else
4306 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4307
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304308 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4309
4310 if (be_is_mc(adapter)) {
4311 /* Assuming that there are 4 channels per port,
4312 * when multi-channel is enabled
4313 */
4314 if (be_is_qnq_mode(adapter))
4315 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4316 else
4317 /* In a non-qnq multichannel mode, the pvid
4318 * takes up one vlan entry
4319 */
4320 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4321 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304322 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304323 }
4324
Sathya Perla92bf14a2013-08-27 16:57:32 +05304325 res->max_mcast_mac = BE_MAX_MC;
4326
Vasundhara Volama5243da2014-03-11 18:53:07 +05304327 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4328 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4329 * *only* if it is RSS-capable.
4330 */
4331 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004332 be_virtfn(adapter) ||
4333 (be_is_mc(adapter) &&
4334 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304335 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304336 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4337 struct be_resources super_nic_res = {0};
4338
4339 /* On a SuperNIC profile, the driver needs to use the
4340 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4341 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004342 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4343 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4344 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304345 /* Some old versions of BE3 FW don't report max_tx_qs value */
4346 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4347 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304348 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304349 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304350
4351 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4352 !use_sriov && be_physfn(adapter))
4353 res->max_rss_qs = (adapter->be3_native) ?
4354 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4355 res->max_rx_qs = res->max_rss_qs + 1;
4356
Suresh Reddye3dc8672014-01-06 13:02:25 +05304357 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304358 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304359 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4360 else
4361 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304362
4363 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004364 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304365 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4366 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4367}
4368
Sathya Perla30128032011-11-10 19:17:57 +00004369static void be_setup_init(struct be_adapter *adapter)
4370{
4371 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004372 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004373 adapter->if_handle = -1;
4374 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004375 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304376 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004377 if (be_physfn(adapter))
4378 adapter->cmd_privileges = MAX_PRIVILEGES;
4379 else
4380 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004381}
4382
Somnath Koturde2b1e02016-06-06 07:22:10 -04004383/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4384 * However, this HW limitation is not exposed to the host via any SLI cmd.
4385 * As a result, in the case of SRIOV and in particular multi-partition configs
4386 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4387 * for distribution between the VFs. This self-imposed limit will determine the
4388 * no: of VFs for which RSS can be enabled.
4389 */
Baoyou Xied766e7e2016-09-18 16:35:29 +08004390static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
Somnath Koturde2b1e02016-06-06 07:22:10 -04004391{
4392 struct be_port_resources port_res = {0};
4393 u8 rss_tables_on_port;
4394 u16 max_vfs = be_max_vfs(adapter);
4395
4396 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4397 RESOURCE_LIMITS, 0);
4398
4399 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4400
4401 /* Each PF Pool's RSS Tables limit =
4402 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4403 */
4404 adapter->pool_res.max_rss_tables =
4405 max_vfs * rss_tables_on_port / port_res.max_vfs;
4406}
4407
Vasundhara Volambec84e62014-06-30 13:01:32 +05304408static int be_get_sriov_config(struct be_adapter *adapter)
4409{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304410 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304411 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304412
Somnath Koturde2b1e02016-06-06 07:22:10 -04004413 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4414 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304415
Vasundhara Volamace40af2015-03-04 00:44:34 -05004416 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304417 if (BE3_chip(adapter) && !res.max_vfs) {
4418 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4419 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4420 }
4421
Sathya Perlad3d18312014-08-01 17:47:30 +05304422 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304423
Vasundhara Volamace40af2015-03-04 00:44:34 -05004424 /* If during previous unload of the driver, the VFs were not disabled,
4425 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4426 * Instead use the TotalVFs value stored in the pci-dev struct.
4427 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304428 old_vfs = pci_num_vf(adapter->pdev);
4429 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004430 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4431 old_vfs);
4432
4433 adapter->pool_res.max_vfs =
4434 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304435 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304436 }
4437
Somnath Koturde2b1e02016-06-06 07:22:10 -04004438 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4439 be_calculate_pf_pool_rss_tables(adapter);
4440 dev_info(&adapter->pdev->dev,
4441 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4442 be_max_pf_pool_rss_tables(adapter));
4443 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304444 return 0;
4445}
4446
Vasundhara Volamace40af2015-03-04 00:44:34 -05004447static void be_alloc_sriov_res(struct be_adapter *adapter)
4448{
4449 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004450 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004451 int status;
4452
4453 be_get_sriov_config(adapter);
4454
4455 if (!old_vfs)
4456 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4457
4458 /* When the HW is in SRIOV capable configuration, the PF-pool
4459 * resources are given to PF during driver load, if there are no
4460 * old VFs. This facility is not available in BE3 FW.
4461 * Also, this is done by FW in Lancer chip.
4462 */
4463 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004464 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004465 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004466 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004467 if (status)
4468 dev_err(&adapter->pdev->dev,
4469 "Failed to optimize SRIOV resources\n");
4470 }
4471}
4472
Sathya Perla92bf14a2013-08-27 16:57:32 +05304473static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004474{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304475 struct device *dev = &adapter->pdev->dev;
4476 struct be_resources res = {0};
4477 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004478
Sathya Perla92bf14a2013-08-27 16:57:32 +05304479 /* For Lancer, SH etc read per-function resource limits from FW.
4480 * GET_FUNC_CONFIG returns per function guaranteed limits.
4481 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4482 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004483 if (BEx_chip(adapter)) {
4484 BEx_get_resources(adapter, &res);
4485 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304486 status = be_cmd_get_func_config(adapter, &res);
4487 if (status)
4488 return status;
4489
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004490 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4491 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4492 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4493 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004494 }
4495
Sathya Perlace7faf02016-06-22 08:54:53 -04004496 /* If RoCE is supported stash away half the EQs for RoCE */
4497 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4498 res.max_evt_qs / 2 : res.max_evt_qs;
4499 adapter->res = res;
4500
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004501 /* If FW supports RSS default queue, then skip creating non-RSS
4502 * queue for non-IP traffic.
4503 */
4504 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4505 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4506
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304507 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4508 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004509 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304510 be_max_vfs(adapter));
4511 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4512 be_max_uc(adapter), be_max_mc(adapter),
4513 be_max_vlans(adapter));
4514
Sathya Perlae2617682016-06-22 08:54:54 -04004515 /* Ensure RX and TX queues are created in pairs at init time */
4516 adapter->cfg_num_rx_irqs =
4517 min_t(u16, netif_get_num_default_rss_queues(),
4518 be_max_qp_irqs(adapter));
4519 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304520 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004521}
4522
Sathya Perla39f1d942012-05-08 19:41:24 +00004523static int be_get_config(struct be_adapter *adapter)
4524{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004525 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304526 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004527
Suresh Reddy980df242015-12-30 01:29:03 -05004528 status = be_cmd_get_cntl_attributes(adapter);
4529 if (status)
4530 return status;
4531
Kalesh APe97e3cd2014-07-17 16:20:26 +05304532 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004533 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304534 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004535
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004536 if (!lancer_chip(adapter) && be_physfn(adapter))
4537 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4538
Sathya Perla6b085ba2015-02-23 04:20:09 -05004539 if (BEx_chip(adapter)) {
4540 level = be_cmd_get_fw_log_level(adapter);
4541 adapter->msg_enable =
4542 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4543 }
4544
4545 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004546 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4547 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004548
Vasundhara Volam21252372015-02-06 08:18:42 -05004549 be_cmd_query_port_name(adapter);
4550
4551 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304552 status = be_cmd_get_active_profile(adapter, &profile_id);
4553 if (!status)
4554 dev_info(&adapter->pdev->dev,
4555 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304556 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304557
Sathya Perla92bf14a2013-08-27 16:57:32 +05304558 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004559}
4560
Sathya Perla95046b92013-07-23 15:25:02 +05304561static int be_mac_setup(struct be_adapter *adapter)
4562{
4563 u8 mac[ETH_ALEN];
4564 int status;
4565
4566 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4567 status = be_cmd_get_perm_mac(adapter, mac);
4568 if (status)
4569 return status;
4570
4571 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4572 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Ivan Vecera4993b392017-01-31 20:01:31 +01004573
4574 /* Initial MAC for BE3 VFs is already programmed by PF */
4575 if (BEx_chip(adapter) && be_virtfn(adapter))
4576 memcpy(adapter->dev_mac, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304577 }
4578
Sathya Perla95046b92013-07-23 15:25:02 +05304579 return 0;
4580}
4581
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304582static void be_schedule_worker(struct be_adapter *adapter)
4583{
Sathya Perlab7172412016-07-27 05:26:18 -04004584 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304585 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4586}
4587
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304588static void be_destroy_err_recovery_workq(void)
4589{
4590 if (!be_err_recovery_workq)
4591 return;
4592
4593 flush_workqueue(be_err_recovery_workq);
4594 destroy_workqueue(be_err_recovery_workq);
4595 be_err_recovery_workq = NULL;
4596}
4597
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304598static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004599{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304600 struct be_error_recovery *err_rec = &adapter->error_recovery;
4601
4602 if (!be_err_recovery_workq)
4603 return;
4604
4605 queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4606 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004607 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4608}
4609
Sathya Perla77071332013-08-27 16:57:34 +05304610static int be_setup_queues(struct be_adapter *adapter)
4611{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304612 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304613 int status;
4614
4615 status = be_evt_queues_create(adapter);
4616 if (status)
4617 goto err;
4618
4619 status = be_tx_qs_create(adapter);
4620 if (status)
4621 goto err;
4622
4623 status = be_rx_cqs_create(adapter);
4624 if (status)
4625 goto err;
4626
4627 status = be_mcc_queues_create(adapter);
4628 if (status)
4629 goto err;
4630
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304631 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4632 if (status)
4633 goto err;
4634
4635 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4636 if (status)
4637 goto err;
4638
Sathya Perla77071332013-08-27 16:57:34 +05304639 return 0;
4640err:
4641 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4642 return status;
4643}
4644
Ajit Khaparde62219062016-02-10 22:45:53 +05304645static int be_if_create(struct be_adapter *adapter)
4646{
4647 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4648 u32 cap_flags = be_if_cap_flags(adapter);
4649 int status;
4650
Sathya Perlab7172412016-07-27 05:26:18 -04004651 /* alloc required memory for other filtering fields */
4652 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4653 sizeof(*adapter->pmac_id), GFP_KERNEL);
4654 if (!adapter->pmac_id)
4655 return -ENOMEM;
4656
4657 adapter->mc_list = kcalloc(be_max_mc(adapter),
4658 sizeof(*adapter->mc_list), GFP_KERNEL);
4659 if (!adapter->mc_list)
4660 return -ENOMEM;
4661
4662 adapter->uc_list = kcalloc(be_max_uc(adapter),
4663 sizeof(*adapter->uc_list), GFP_KERNEL);
4664 if (!adapter->uc_list)
4665 return -ENOMEM;
4666
Sathya Perlae2617682016-06-22 08:54:54 -04004667 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304668 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4669
4670 en_flags &= cap_flags;
4671 /* will enable all the needed filter flags in be_open() */
4672 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4673 &adapter->if_handle, 0);
4674
Sathya Perlab7172412016-07-27 05:26:18 -04004675 if (status)
4676 return status;
4677
4678 return 0;
Ajit Khaparde62219062016-02-10 22:45:53 +05304679}
4680
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304681int be_update_queues(struct be_adapter *adapter)
4682{
4683 struct net_device *netdev = adapter->netdev;
4684 int status;
4685
4686 if (netif_running(netdev))
4687 be_close(netdev);
4688
4689 be_cancel_worker(adapter);
4690
4691 /* If any vectors have been shared with RoCE we cannot re-program
4692 * the MSIx table.
4693 */
4694 if (!adapter->num_msix_roce_vec)
4695 be_msix_disable(adapter);
4696
4697 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304698 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4699 if (status)
4700 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304701
4702 if (!msix_enabled(adapter)) {
4703 status = be_msix_enable(adapter);
4704 if (status)
4705 return status;
4706 }
4707
Ajit Khaparde62219062016-02-10 22:45:53 +05304708 status = be_if_create(adapter);
4709 if (status)
4710 return status;
4711
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304712 status = be_setup_queues(adapter);
4713 if (status)
4714 return status;
4715
4716 be_schedule_worker(adapter);
4717
4718 if (netif_running(netdev))
4719 status = be_open(netdev);
4720
4721 return status;
4722}
4723
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004724static inline int fw_major_num(const char *fw_ver)
4725{
4726 int fw_major = 0, i;
4727
4728 i = sscanf(fw_ver, "%d.", &fw_major);
4729 if (i != 1)
4730 return 0;
4731
4732 return fw_major;
4733}
4734
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304735/* If it is error recovery, FLR the PF
4736 * Else if any VFs are already enabled don't FLR the PF
4737 */
Sathya Perlaf962f842015-02-23 04:20:16 -05004738static bool be_reset_required(struct be_adapter *adapter)
4739{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304740 if (be_error_recovering(adapter))
4741 return true;
4742 else
4743 return pci_num_vf(adapter->pdev) == 0;
Sathya Perlaf962f842015-02-23 04:20:16 -05004744}
4745
4746/* Wait for the FW to be ready and perform the required initialization */
4747static int be_func_init(struct be_adapter *adapter)
4748{
4749 int status;
4750
4751 status = be_fw_wait_ready(adapter);
4752 if (status)
4753 return status;
4754
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304755 /* FW is now ready; clear errors to allow cmds/doorbell */
4756 be_clear_error(adapter, BE_CLEAR_ALL);
4757
Sathya Perlaf962f842015-02-23 04:20:16 -05004758 if (be_reset_required(adapter)) {
4759 status = be_cmd_reset_function(adapter);
4760 if (status)
4761 return status;
4762
4763 /* Wait for interrupts to quiesce after an FLR */
4764 msleep(100);
Sathya Perlaf962f842015-02-23 04:20:16 -05004765 }
4766
4767 /* Tell FW we're ready to fire cmds */
4768 status = be_cmd_fw_init(adapter);
4769 if (status)
4770 return status;
4771
4772 /* Allow interrupts for other ULPs running on NIC function */
4773 be_intr_set(adapter, true);
4774
4775 return 0;
4776}
4777
Sathya Perla5fb379e2009-06-18 00:02:59 +00004778static int be_setup(struct be_adapter *adapter)
4779{
Sathya Perla39f1d942012-05-08 19:41:24 +00004780 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004781 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004782
Sathya Perlaf962f842015-02-23 04:20:16 -05004783 status = be_func_init(adapter);
4784 if (status)
4785 return status;
4786
Sathya Perla30128032011-11-10 19:17:57 +00004787 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004788
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004789 if (!lancer_chip(adapter))
4790 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004791
Suresh Reddy980df242015-12-30 01:29:03 -05004792 /* invoke this cmd first to get pf_num and vf_num which are needed
4793 * for issuing profile related cmds
4794 */
4795 if (!BEx_chip(adapter)) {
4796 status = be_cmd_get_func_config(adapter, NULL);
4797 if (status)
4798 return status;
4799 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004800
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004801 status = be_get_config(adapter);
4802 if (status)
4803 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004804
Somnath Koturde2b1e02016-06-06 07:22:10 -04004805 if (!BE2_chip(adapter) && be_physfn(adapter))
4806 be_alloc_sriov_res(adapter);
4807
4808 status = be_get_resources(adapter);
4809 if (status)
4810 goto err;
4811
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004812 status = be_msix_enable(adapter);
4813 if (status)
4814 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004815
Kalesh APbcc84142015-08-05 03:27:48 -04004816 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304817 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004818 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004819 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004820
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304821 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4822 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304823 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304824 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004825 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004826 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004827
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004828 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004829
Sathya Perla95046b92013-07-23 15:25:02 +05304830 status = be_mac_setup(adapter);
4831 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004832 goto err;
4833
Kalesh APe97e3cd2014-07-17 16:20:26 +05304834 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304835 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004836
Somnath Koture9e2a902013-10-24 14:37:53 +05304837 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304838 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304839 adapter->fw_ver);
4840 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4841 }
4842
Kalesh AP00d594c2015-01-20 03:51:44 -05004843 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4844 adapter->rx_fc);
4845 if (status)
4846 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4847 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004848
Kalesh AP00d594c2015-01-20 03:51:44 -05004849 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4850 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004851
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304852 if (be_physfn(adapter))
4853 be_cmd_set_logical_link_config(adapter,
4854 IFLA_VF_LINK_STATE_AUTO, 0);
4855
Somnath Kotur884476b2016-06-22 08:54:55 -04004856 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4857 * confusing a linux bridge or OVS that it might be connected to.
4858 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4859 * when SRIOV is not enabled.
4860 */
4861 if (BE3_chip(adapter))
4862 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4863 PORT_FWD_TYPE_PASSTHRU, 0);
4864
Vasundhara Volambec84e62014-06-30 13:01:32 +05304865 if (adapter->num_vfs)
4866 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004867
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004868 status = be_cmd_get_phy_info(adapter);
4869 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004870 adapter->phy.fc_autoneg = 1;
4871
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05304872 if (be_physfn(adapter) && !lancer_chip(adapter))
4873 be_cmd_set_features(adapter);
4874
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304875 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304876 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004877 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004878err:
4879 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004880 return status;
4881}
4882
Ivan Vecera66268732011-12-08 01:31:21 +00004883#ifdef CONFIG_NET_POLL_CONTROLLER
4884static void be_netpoll(struct net_device *netdev)
4885{
4886 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004887 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004888 int i;
4889
Sathya Perlae49cc342012-11-27 19:50:02 +00004890 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004891 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004892 napi_schedule(&eqo->napi);
4893 }
Ivan Vecera66268732011-12-08 01:31:21 +00004894}
4895#endif
4896
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004897int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4898{
4899 const struct firmware *fw;
4900 int status;
4901
4902 if (!netif_running(adapter->netdev)) {
4903 dev_err(&adapter->pdev->dev,
4904 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304905 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004906 }
4907
4908 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4909 if (status)
4910 goto fw_exit;
4911
4912 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4913
4914 if (lancer_chip(adapter))
4915 status = lancer_fw_download(adapter, fw);
4916 else
4917 status = be_fw_download(adapter, fw);
4918
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004919 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304920 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004921
Ajit Khaparde84517482009-09-04 03:12:16 +00004922fw_exit:
4923 release_firmware(fw);
4924 return status;
4925}
4926
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004927static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4928 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004929{
4930 struct be_adapter *adapter = netdev_priv(dev);
4931 struct nlattr *attr, *br_spec;
4932 int rem;
4933 int status = 0;
4934 u16 mode = 0;
4935
4936 if (!sriov_enabled(adapter))
4937 return -EOPNOTSUPP;
4938
4939 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004940 if (!br_spec)
4941 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004942
4943 nla_for_each_nested(attr, br_spec, rem) {
4944 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4945 continue;
4946
Thomas Grafb7c1a312014-11-26 13:42:17 +01004947 if (nla_len(attr) < sizeof(mode))
4948 return -EINVAL;
4949
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004950 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004951 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4952 return -EOPNOTSUPP;
4953
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004954 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4955 return -EINVAL;
4956
4957 status = be_cmd_set_hsw_config(adapter, 0, 0,
4958 adapter->if_handle,
4959 mode == BRIDGE_MODE_VEPA ?
4960 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004961 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004962 if (status)
4963 goto err;
4964
4965 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4966 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4967
4968 return status;
4969 }
4970err:
4971 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4972 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4973
4974 return status;
4975}
4976
4977static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004978 struct net_device *dev, u32 filter_mask,
4979 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004980{
4981 struct be_adapter *adapter = netdev_priv(dev);
4982 int status = 0;
4983 u8 hsw_mode;
4984
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004985 /* BE and Lancer chips support VEB mode only */
4986 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004987 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4988 if (!pci_sriov_get_totalvfs(adapter->pdev))
4989 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004990 hsw_mode = PORT_FWD_TYPE_VEB;
4991 } else {
4992 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004993 adapter->if_handle, &hsw_mode,
4994 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004995 if (status)
4996 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004997
4998 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4999 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005000 }
5001
5002 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5003 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01005004 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07005005 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005006}
5007
Sathya Perlab7172412016-07-27 05:26:18 -04005008static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5009 void (*func)(struct work_struct *))
5010{
5011 struct be_cmd_work *work;
5012
5013 work = kzalloc(sizeof(*work), GFP_ATOMIC);
5014 if (!work) {
5015 dev_err(&adapter->pdev->dev,
5016 "be_work memory allocation failed\n");
5017 return NULL;
5018 }
5019
5020 INIT_WORK(&work->work, func);
5021 work->adapter = adapter;
5022 return work;
5023}
5024
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005025/* VxLAN offload Notes:
5026 *
5027 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5028 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5029 * is expected to work across all types of IP tunnels once exported. Skyhawk
5030 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305031 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5032 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5033 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005034 *
5035 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5036 * adds more than one port, disable offloads and don't re-enable them again
5037 * until after all the tunnels are removed.
5038 */
Sathya Perlab7172412016-07-27 05:26:18 -04005039static void be_work_add_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305040{
Sathya Perlab7172412016-07-27 05:26:18 -04005041 struct be_cmd_work *cmd_work =
5042 container_of(work, struct be_cmd_work, work);
5043 struct be_adapter *adapter = cmd_work->adapter;
5044 struct net_device *netdev = adapter->netdev;
Sathya Perlac9c47142014-03-27 10:46:19 +05305045 struct device *dev = &adapter->pdev->dev;
Sathya Perlab7172412016-07-27 05:26:18 -04005046 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305047 int status;
5048
Jiri Benc1e5b3112015-09-17 16:11:13 +02005049 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
5050 adapter->vxlan_port_aliases++;
Sathya Perlab7172412016-07-27 05:26:18 -04005051 goto done;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005052 }
5053
Sathya Perlac9c47142014-03-27 10:46:19 +05305054 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305055 dev_info(dev,
5056 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005057 dev_info(dev, "Disabling VxLAN offloads\n");
5058 adapter->vxlan_port_count++;
5059 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305060 }
5061
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005062 if (adapter->vxlan_port_count++ >= 1)
Sathya Perlab7172412016-07-27 05:26:18 -04005063 goto done;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005064
Sathya Perlac9c47142014-03-27 10:46:19 +05305065 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5066 OP_CONVERT_NORMAL_TO_TUNNEL);
5067 if (status) {
5068 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5069 goto err;
5070 }
5071
5072 status = be_cmd_set_vxlan_port(adapter, port);
5073 if (status) {
5074 dev_warn(dev, "Failed to add VxLAN port\n");
5075 goto err;
5076 }
5077 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5078 adapter->vxlan_port = port;
5079
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005080 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5081 NETIF_F_TSO | NETIF_F_TSO6 |
5082 NETIF_F_GSO_UDP_TUNNEL;
5083 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305084 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005085
Sathya Perlac9c47142014-03-27 10:46:19 +05305086 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5087 be16_to_cpu(port));
Sathya Perlab7172412016-07-27 05:26:18 -04005088 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305089err:
5090 be_disable_vxlan_offloads(adapter);
Sathya Perlab7172412016-07-27 05:26:18 -04005091done:
5092 kfree(cmd_work);
Sathya Perlac9c47142014-03-27 10:46:19 +05305093}
5094
Sathya Perlab7172412016-07-27 05:26:18 -04005095static void be_work_del_vxlan_port(struct work_struct *work)
Sathya Perlac9c47142014-03-27 10:46:19 +05305096{
Sathya Perlab7172412016-07-27 05:26:18 -04005097 struct be_cmd_work *cmd_work =
5098 container_of(work, struct be_cmd_work, work);
5099 struct be_adapter *adapter = cmd_work->adapter;
5100 __be16 port = cmd_work->info.vxlan_port;
Sathya Perlac9c47142014-03-27 10:46:19 +05305101
5102 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005103 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305104
Jiri Benc1e5b3112015-09-17 16:11:13 +02005105 if (adapter->vxlan_port_aliases) {
5106 adapter->vxlan_port_aliases--;
Sathya Perlab7172412016-07-27 05:26:18 -04005107 goto out;
Jiri Benc1e5b3112015-09-17 16:11:13 +02005108 }
5109
Sathya Perlac9c47142014-03-27 10:46:19 +05305110 be_disable_vxlan_offloads(adapter);
5111
5112 dev_info(&adapter->pdev->dev,
5113 "Disabled VxLAN offloads for UDP port %d\n",
5114 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005115done:
5116 adapter->vxlan_port_count--;
Sathya Perlab7172412016-07-27 05:26:18 -04005117out:
5118 kfree(cmd_work);
5119}
5120
5121static void be_cfg_vxlan_port(struct net_device *netdev,
5122 struct udp_tunnel_info *ti,
5123 void (*func)(struct work_struct *))
5124{
5125 struct be_adapter *adapter = netdev_priv(netdev);
5126 struct be_cmd_work *cmd_work;
5127
5128 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
5129 return;
5130
5131 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
5132 return;
5133
5134 cmd_work = be_alloc_work(adapter, func);
5135 if (cmd_work) {
5136 cmd_work->info.vxlan_port = ti->port;
5137 queue_work(be_wq, &cmd_work->work);
5138 }
5139}
5140
5141static void be_del_vxlan_port(struct net_device *netdev,
5142 struct udp_tunnel_info *ti)
5143{
5144 be_cfg_vxlan_port(netdev, ti, be_work_del_vxlan_port);
5145}
5146
5147static void be_add_vxlan_port(struct net_device *netdev,
5148 struct udp_tunnel_info *ti)
5149{
5150 be_cfg_vxlan_port(netdev, ti, be_work_add_vxlan_port);
Sathya Perlac9c47142014-03-27 10:46:19 +05305151}
Joe Stringer725d5482014-11-13 16:38:13 -08005152
Jesse Gross5f352272014-12-23 22:37:26 -08005153static netdev_features_t be_features_check(struct sk_buff *skb,
5154 struct net_device *dev,
5155 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005156{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305157 struct be_adapter *adapter = netdev_priv(dev);
5158 u8 l4_hdr = 0;
5159
5160 /* The code below restricts offload features for some tunneled packets.
5161 * Offload features for normal (non tunnel) packets are unchanged.
5162 */
5163 if (!skb->encapsulation ||
5164 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5165 return features;
5166
5167 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5168 * should disable tunnel offload features if it's not a VxLAN packet,
5169 * as tunnel offloads have been enabled only for VxLAN. This is done to
5170 * allow other tunneled traffic like GRE work fine while VxLAN
5171 * offloads are configured in Skyhawk-R.
5172 */
5173 switch (vlan_get_protocol(skb)) {
5174 case htons(ETH_P_IP):
5175 l4_hdr = ip_hdr(skb)->protocol;
5176 break;
5177 case htons(ETH_P_IPV6):
5178 l4_hdr = ipv6_hdr(skb)->nexthdr;
5179 break;
5180 default:
5181 return features;
5182 }
5183
5184 if (l4_hdr != IPPROTO_UDP ||
5185 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5186 skb->inner_protocol != htons(ETH_P_TEB) ||
5187 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
Sabrina Dubroca096de2f2017-01-03 16:26:04 +01005188 sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5189 !adapter->vxlan_port ||
5190 udp_hdr(skb)->dest != adapter->vxlan_port)
Tom Herberta1882222015-12-14 11:19:43 -08005191 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305192
5193 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005194}
Sathya Perlac9c47142014-03-27 10:46:19 +05305195
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305196static int be_get_phys_port_id(struct net_device *dev,
5197 struct netdev_phys_item_id *ppid)
5198{
5199 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5200 struct be_adapter *adapter = netdev_priv(dev);
5201 u8 *id;
5202
5203 if (MAX_PHYS_ITEM_ID_LEN < id_len)
5204 return -ENOSPC;
5205
5206 ppid->id[0] = adapter->hba_port_num + 1;
5207 id = &ppid->id[1];
5208 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5209 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5210 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5211
5212 ppid->id_len = id_len;
5213
5214 return 0;
5215}
5216
Sathya Perlab7172412016-07-27 05:26:18 -04005217static void be_set_rx_mode(struct net_device *dev)
5218{
5219 struct be_adapter *adapter = netdev_priv(dev);
5220 struct be_cmd_work *work;
5221
5222 work = be_alloc_work(adapter, be_work_set_rx_mode);
5223 if (work)
5224 queue_work(be_wq, &work->work);
5225}
5226
stephen hemmingere5686ad2012-01-05 19:10:25 +00005227static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005228 .ndo_open = be_open,
5229 .ndo_stop = be_close,
5230 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005231 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005232 .ndo_set_mac_address = be_mac_addr_set,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005233 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005234 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005235 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5236 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005237 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005238 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005239 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005240 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305241 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005242 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005243#ifdef CONFIG_NET_POLL_CONTROLLER
5244 .ndo_poll_controller = be_netpoll,
5245#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005246 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5247 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305248#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305249 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305250#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005251 .ndo_udp_tunnel_add = be_add_vxlan_port,
5252 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005253 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305254 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005255};
5256
5257static void be_netdev_init(struct net_device *netdev)
5258{
5259 struct be_adapter *adapter = netdev_priv(netdev);
5260
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005261 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005262 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005263 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305264 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005265 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005266
5267 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005268 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005269
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005270 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005271 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005272
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005273 netdev->priv_flags |= IFF_UNICAST_FLT;
5274
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005275 netdev->flags |= IFF_MULTICAST;
5276
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305277 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005278
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005279 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005280
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005281 netdev->ethtool_ops = &be_ethtool_ops;
Jarod Wilsond894be52016-10-20 13:55:16 -04005282
5283 /* MTU range: 256 - 9000 */
5284 netdev->min_mtu = BE_MIN_MTU;
5285 netdev->max_mtu = BE_MAX_MTU;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005286}
5287
Kalesh AP87ac1a52015-02-23 04:20:15 -05005288static void be_cleanup(struct be_adapter *adapter)
5289{
5290 struct net_device *netdev = adapter->netdev;
5291
5292 rtnl_lock();
5293 netif_device_detach(netdev);
5294 if (netif_running(netdev))
5295 be_close(netdev);
5296 rtnl_unlock();
5297
5298 be_clear(adapter);
5299}
5300
Kalesh AP484d76f2015-02-23 04:20:14 -05005301static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005302{
Kalesh APd0e1b312015-02-23 04:20:12 -05005303 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005304 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005305
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005306 status = be_setup(adapter);
5307 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005308 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005309
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005310 rtnl_lock();
5311 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005312 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005313 rtnl_unlock();
5314
5315 if (status)
5316 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005317
Kalesh APd0e1b312015-02-23 04:20:12 -05005318 netif_device_attach(netdev);
5319
Kalesh AP484d76f2015-02-23 04:20:14 -05005320 return 0;
5321}
5322
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305323static void be_soft_reset(struct be_adapter *adapter)
5324{
5325 u32 val;
5326
5327 dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5328 val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5329 val |= SLIPORT_SOFTRESET_SR_MASK;
5330 iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5331}
5332
5333static bool be_err_is_recoverable(struct be_adapter *adapter)
5334{
5335 struct be_error_recovery *err_rec = &adapter->error_recovery;
5336 unsigned long initial_idle_time =
5337 msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5338 unsigned long recovery_interval =
5339 msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5340 u16 ue_err_code;
5341 u32 val;
5342
5343 val = be_POST_stage_get(adapter);
5344 if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5345 return false;
5346 ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5347 if (ue_err_code == 0)
5348 return false;
5349
5350 dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5351 ue_err_code);
5352
5353 if (jiffies - err_rec->probe_time <= initial_idle_time) {
5354 dev_err(&adapter->pdev->dev,
5355 "Cannot recover within %lu sec from driver load\n",
5356 jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5357 return false;
5358 }
5359
5360 if (err_rec->last_recovery_time &&
5361 (jiffies - err_rec->last_recovery_time <= recovery_interval)) {
5362 dev_err(&adapter->pdev->dev,
5363 "Cannot recover within %lu sec from last recovery\n",
5364 jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5365 return false;
5366 }
5367
5368 if (ue_err_code == err_rec->last_err_code) {
5369 dev_err(&adapter->pdev->dev,
5370 "Cannot recover from a consecutive TPE error\n");
5371 return false;
5372 }
5373
5374 err_rec->last_recovery_time = jiffies;
5375 err_rec->last_err_code = ue_err_code;
5376 return true;
5377}
5378
5379static int be_tpe_recover(struct be_adapter *adapter)
5380{
5381 struct be_error_recovery *err_rec = &adapter->error_recovery;
5382 int status = -EAGAIN;
5383 u32 val;
5384
5385 switch (err_rec->recovery_state) {
5386 case ERR_RECOVERY_ST_NONE:
5387 err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5388 err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5389 break;
5390
5391 case ERR_RECOVERY_ST_DETECT:
5392 val = be_POST_stage_get(adapter);
5393 if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5394 POST_STAGE_RECOVERABLE_ERR) {
5395 dev_err(&adapter->pdev->dev,
5396 "Unrecoverable HW error detected: 0x%x\n", val);
5397 status = -EINVAL;
5398 err_rec->resched_delay = 0;
5399 break;
5400 }
5401
5402 dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5403
5404 /* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5405 * milliseconds before it checks for final error status in
5406 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5407 * If it does, then PF0 initiates a Soft Reset.
5408 */
5409 if (adapter->pf_num == 0) {
5410 err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5411 err_rec->resched_delay = err_rec->ue_to_reset_time -
5412 ERR_RECOVERY_UE_DETECT_DURATION;
5413 break;
5414 }
5415
5416 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5417 err_rec->resched_delay = err_rec->ue_to_poll_time -
5418 ERR_RECOVERY_UE_DETECT_DURATION;
5419 break;
5420
5421 case ERR_RECOVERY_ST_RESET:
5422 if (!be_err_is_recoverable(adapter)) {
5423 dev_err(&adapter->pdev->dev,
5424 "Failed to meet recovery criteria\n");
5425 status = -EIO;
5426 err_rec->resched_delay = 0;
5427 break;
5428 }
5429 be_soft_reset(adapter);
5430 err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5431 err_rec->resched_delay = err_rec->ue_to_poll_time -
5432 err_rec->ue_to_reset_time;
5433 break;
5434
5435 case ERR_RECOVERY_ST_PRE_POLL:
5436 err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5437 err_rec->resched_delay = 0;
5438 status = 0; /* done */
5439 break;
5440
5441 default:
5442 status = -EINVAL;
5443 err_rec->resched_delay = 0;
5444 break;
5445 }
5446
5447 return status;
5448}
5449
Kalesh AP484d76f2015-02-23 04:20:14 -05005450static int be_err_recover(struct be_adapter *adapter)
5451{
Kalesh AP484d76f2015-02-23 04:20:14 -05005452 int status;
5453
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305454 if (!lancer_chip(adapter)) {
5455 if (!adapter->error_recovery.recovery_supported ||
5456 adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5457 return -EIO;
5458 status = be_tpe_recover(adapter);
5459 if (status)
5460 goto err;
5461 }
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305462
5463 /* Wait for adapter to reach quiescent state before
5464 * destroying queues
5465 */
5466 status = be_fw_wait_ready(adapter);
5467 if (status)
5468 goto err;
5469
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305470 adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5471
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305472 be_cleanup(adapter);
5473
Kalesh AP484d76f2015-02-23 04:20:14 -05005474 status = be_resume(adapter);
5475 if (status)
5476 goto err;
5477
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305478 adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5479
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005480err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005481 return status;
5482}
5483
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005484static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005485{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305486 struct be_error_recovery *err_rec =
5487 container_of(work, struct be_error_recovery,
5488 err_detection_work.work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005489 struct be_adapter *adapter =
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305490 container_of(err_rec, struct be_adapter,
5491 error_recovery);
5492 u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305493 struct device *dev = &adapter->pdev->dev;
5494 int recovery_status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005495
5496 be_detect_error(adapter);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305497 if (!be_check_error(adapter, BE_ERROR_HW))
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305498 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005499
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305500 recovery_status = be_err_recover(adapter);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305501 if (!recovery_status) {
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305502 err_rec->recovery_retries = 0;
5503 err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305504 dev_info(dev, "Adapter recovery successful\n");
5505 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305506 } else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5507 /* BEx/SH recovery state machine */
5508 if (adapter->pf_num == 0 &&
5509 err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5510 dev_err(&adapter->pdev->dev,
5511 "Adapter recovery in progress\n");
5512 resched_delay = err_rec->resched_delay;
5513 goto reschedule_task;
5514 } else if (lancer_chip(adapter) && be_virtfn(adapter)) {
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305515 /* For VFs, check if PF have allocated resources
5516 * every second.
5517 */
5518 dev_err(dev, "Re-trying adapter recovery\n");
5519 goto reschedule_task;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305520 } else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5521 ERR_RECOVERY_MAX_RETRY_COUNT) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305522 /* In case of another error during recovery, it takes 30 sec
5523 * for adapter to come out of error. Retry error recovery after
5524 * this time interval.
5525 */
5526 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305527 resched_delay = ERR_RECOVERY_RETRY_DELAY;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305528 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305529 } else {
5530 dev_err(dev, "Adapter recovery failed\n");
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305531 dev_err(dev, "Please reboot server to recover\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005532 }
5533
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305534 return;
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305535
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305536reschedule_task:
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305537 be_schedule_err_detection(adapter, resched_delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005538}
5539
Vasundhara Volam21252372015-02-06 08:18:42 -05005540static void be_log_sfp_info(struct be_adapter *adapter)
5541{
5542 int status;
5543
5544 status = be_cmd_query_sfp_info(adapter);
5545 if (!status) {
5546 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305547 "Port %c: %s Vendor: %s part no: %s",
5548 adapter->port_name,
5549 be_misconfig_evt_port_state[adapter->phy_state],
5550 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005551 adapter->phy.vendor_pn);
5552 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305553 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005554}
5555
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005556static void be_worker(struct work_struct *work)
5557{
5558 struct be_adapter *adapter =
5559 container_of(work, struct be_adapter, work.work);
5560 struct be_rx_obj *rxo;
5561 int i;
5562
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005563 if (be_physfn(adapter) &&
5564 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5565 be_cmd_get_die_temperature(adapter);
5566
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005567 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005568 * mcc completions
5569 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005570 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005571 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005572 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005573 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005574 goto reschedule;
5575 }
5576
5577 if (!adapter->stats_cmd_sent) {
5578 if (lancer_chip(adapter))
5579 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305580 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005581 else
5582 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5583 }
5584
5585 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305586 /* Replenish RX-queues starved due to memory
5587 * allocation failures.
5588 */
5589 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305590 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005591 }
5592
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005593 /* EQ-delay update for Skyhawk is done while notifying EQ */
5594 if (!skyhawk_chip(adapter))
5595 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005596
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305597 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005598 be_log_sfp_info(adapter);
5599
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005600reschedule:
5601 adapter->work_counter++;
Sathya Perlab7172412016-07-27 05:26:18 -04005602 queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005603}
5604
Sathya Perla78fad34e2015-02-23 04:20:08 -05005605static void be_unmap_pci_bars(struct be_adapter *adapter)
5606{
5607 if (adapter->csr)
5608 pci_iounmap(adapter->pdev, adapter->csr);
5609 if (adapter->db)
5610 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005611 if (adapter->pcicfg && adapter->pcicfg_mapped)
5612 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005613}
5614
5615static int db_bar(struct be_adapter *adapter)
5616{
Kalesh AP18c57c72015-05-06 05:30:38 -04005617 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005618 return 0;
5619 else
5620 return 4;
5621}
5622
5623static int be_roce_map_pci_bars(struct be_adapter *adapter)
5624{
5625 if (skyhawk_chip(adapter)) {
5626 adapter->roce_db.size = 4096;
5627 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5628 db_bar(adapter));
5629 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5630 db_bar(adapter));
5631 }
5632 return 0;
5633}
5634
5635static int be_map_pci_bars(struct be_adapter *adapter)
5636{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005637 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005638 u8 __iomem *addr;
5639 u32 sli_intf;
5640
5641 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5642 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5643 SLI_INTF_FAMILY_SHIFT;
5644 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5645
5646 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005647 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005648 if (!adapter->csr)
5649 return -ENOMEM;
5650 }
5651
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005652 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005653 if (!addr)
5654 goto pci_map_err;
5655 adapter->db = addr;
5656
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005657 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5658 if (be_physfn(adapter)) {
5659 /* PCICFG is the 2nd BAR in BE2 */
5660 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5661 if (!addr)
5662 goto pci_map_err;
5663 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005664 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005665 } else {
5666 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005667 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005668 }
5669 }
5670
Sathya Perla78fad34e2015-02-23 04:20:08 -05005671 be_roce_map_pci_bars(adapter);
5672 return 0;
5673
5674pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005675 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005676 be_unmap_pci_bars(adapter);
5677 return -ENOMEM;
5678}
5679
5680static void be_drv_cleanup(struct be_adapter *adapter)
5681{
5682 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5683 struct device *dev = &adapter->pdev->dev;
5684
5685 if (mem->va)
5686 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5687
5688 mem = &adapter->rx_filter;
5689 if (mem->va)
5690 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5691
5692 mem = &adapter->stats_cmd;
5693 if (mem->va)
5694 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5695}
5696
5697/* Allocate and initialize various fields in be_adapter struct */
5698static int be_drv_init(struct be_adapter *adapter)
5699{
5700 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5701 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5702 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5703 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5704 struct device *dev = &adapter->pdev->dev;
5705 int status = 0;
5706
5707 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305708 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5709 &mbox_mem_alloc->dma,
5710 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005711 if (!mbox_mem_alloc->va)
5712 return -ENOMEM;
5713
5714 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5715 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5716 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005717
5718 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5719 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5720 &rx_filter->dma, GFP_KERNEL);
5721 if (!rx_filter->va) {
5722 status = -ENOMEM;
5723 goto free_mbox;
5724 }
5725
5726 if (lancer_chip(adapter))
5727 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5728 else if (BE2_chip(adapter))
5729 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5730 else if (BE3_chip(adapter))
5731 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5732 else
5733 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5734 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5735 &stats_cmd->dma, GFP_KERNEL);
5736 if (!stats_cmd->va) {
5737 status = -ENOMEM;
5738 goto free_rx_filter;
5739 }
5740
5741 mutex_init(&adapter->mbox_lock);
Sathya Perlab7172412016-07-27 05:26:18 -04005742 mutex_init(&adapter->mcc_lock);
5743 mutex_init(&adapter->rx_filter_lock);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005744 spin_lock_init(&adapter->mcc_cq_lock);
5745 init_completion(&adapter->et_cmd_compl);
5746
5747 pci_save_state(adapter->pdev);
5748
5749 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305750
5751 adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5752 adapter->error_recovery.resched_delay = 0;
5753 INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005754 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005755
5756 adapter->rx_fc = true;
5757 adapter->tx_fc = true;
5758
5759 /* Must be a power of 2 or else MODULO will BUG_ON */
5760 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005761
5762 return 0;
5763
5764free_rx_filter:
5765 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5766free_mbox:
5767 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5768 mbox_mem_alloc->dma);
5769 return status;
5770}
5771
5772static void be_remove(struct pci_dev *pdev)
5773{
5774 struct be_adapter *adapter = pci_get_drvdata(pdev);
5775
5776 if (!adapter)
5777 return;
5778
5779 be_roce_dev_remove(adapter);
5780 be_intr_set(adapter, false);
5781
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005782 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005783
5784 unregister_netdev(adapter->netdev);
5785
5786 be_clear(adapter);
5787
Somnath Koturf72099e2016-09-07 19:57:50 +05305788 if (!pci_vfs_assigned(adapter->pdev))
5789 be_cmd_reset_function(adapter);
5790
Sathya Perla78fad34e2015-02-23 04:20:08 -05005791 /* tell fw we're done with firing cmds */
5792 be_cmd_fw_clean(adapter);
5793
5794 be_unmap_pci_bars(adapter);
5795 be_drv_cleanup(adapter);
5796
5797 pci_disable_pcie_error_reporting(pdev);
5798
5799 pci_release_regions(pdev);
5800 pci_disable_device(pdev);
5801
5802 free_netdev(adapter->netdev);
5803}
5804
Arnd Bergmann9a032592015-05-18 23:06:45 +02005805static ssize_t be_hwmon_show_temp(struct device *dev,
5806 struct device_attribute *dev_attr,
5807 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305808{
5809 struct be_adapter *adapter = dev_get_drvdata(dev);
5810
5811 /* Unit: millidegree Celsius */
5812 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5813 return -EIO;
5814 else
5815 return sprintf(buf, "%u\n",
5816 adapter->hwmon_info.be_on_die_temp * 1000);
5817}
5818
5819static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5820 be_hwmon_show_temp, NULL, 1);
5821
5822static struct attribute *be_hwmon_attrs[] = {
5823 &sensor_dev_attr_temp1_input.dev_attr.attr,
5824 NULL
5825};
5826
5827ATTRIBUTE_GROUPS(be_hwmon);
5828
Sathya Perlad3791422012-09-28 04:39:44 +00005829static char *mc_name(struct be_adapter *adapter)
5830{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305831 char *str = ""; /* default */
5832
5833 switch (adapter->mc_type) {
5834 case UMC:
5835 str = "UMC";
5836 break;
5837 case FLEX10:
5838 str = "FLEX10";
5839 break;
5840 case vNIC1:
5841 str = "vNIC-1";
5842 break;
5843 case nPAR:
5844 str = "nPAR";
5845 break;
5846 case UFP:
5847 str = "UFP";
5848 break;
5849 case vNIC2:
5850 str = "vNIC-2";
5851 break;
5852 default:
5853 str = "";
5854 }
5855
5856 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005857}
5858
5859static inline char *func_name(struct be_adapter *adapter)
5860{
5861 return be_physfn(adapter) ? "PF" : "VF";
5862}
5863
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005864static inline char *nic_name(struct pci_dev *pdev)
5865{
5866 switch (pdev->device) {
5867 case OC_DEVICE_ID1:
5868 return OC_NAME;
5869 case OC_DEVICE_ID2:
5870 return OC_NAME_BE;
5871 case OC_DEVICE_ID3:
5872 case OC_DEVICE_ID4:
5873 return OC_NAME_LANCER;
5874 case BE_DEVICE_ID2:
5875 return BE3_NAME;
5876 case OC_DEVICE_ID5:
5877 case OC_DEVICE_ID6:
5878 return OC_NAME_SH;
5879 default:
5880 return BE_NAME;
5881 }
5882}
5883
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005884static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005885{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005886 struct be_adapter *adapter;
5887 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005888 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005889
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305890 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5891
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005892 status = pci_enable_device(pdev);
5893 if (status)
5894 goto do_none;
5895
5896 status = pci_request_regions(pdev, DRV_NAME);
5897 if (status)
5898 goto disable_dev;
5899 pci_set_master(pdev);
5900
Sathya Perla7f640062012-06-05 19:37:20 +00005901 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305902 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005903 status = -ENOMEM;
5904 goto rel_reg;
5905 }
5906 adapter = netdev_priv(netdev);
5907 adapter->pdev = pdev;
5908 pci_set_drvdata(pdev, adapter);
5909 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005910 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005911
Russell King4c15c242013-06-26 23:49:11 +01005912 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005913 if (!status) {
5914 netdev->features |= NETIF_F_HIGHDMA;
5915 } else {
Russell King4c15c242013-06-26 23:49:11 +01005916 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005917 if (status) {
5918 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5919 goto free_netdev;
5920 }
5921 }
5922
Kalesh AP2f951a92014-09-12 17:39:21 +05305923 status = pci_enable_pcie_error_reporting(pdev);
5924 if (!status)
5925 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005926
Sathya Perla78fad34e2015-02-23 04:20:08 -05005927 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005928 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005929 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005930
Sathya Perla78fad34e2015-02-23 04:20:08 -05005931 status = be_drv_init(adapter);
5932 if (status)
5933 goto unmap_bars;
5934
Sathya Perla5fb379e2009-06-18 00:02:59 +00005935 status = be_setup(adapter);
5936 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005937 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005938
Sathya Perla3abcded2010-10-03 22:12:27 -07005939 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005940 status = register_netdev(netdev);
5941 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005942 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005943
Parav Pandit045508a2012-03-26 14:27:13 +00005944 be_roce_dev_add(adapter);
5945
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305946 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05305947 adapter->error_recovery.probe_time = jiffies;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005948
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305949 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005950 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305951 adapter->hwmon_info.hwmon_dev =
5952 devm_hwmon_device_register_with_groups(&pdev->dev,
5953 DRV_NAME,
5954 adapter,
5955 be_hwmon_groups);
5956 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5957 }
5958
Sathya Perlad3791422012-09-28 04:39:44 +00005959 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005960 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005961
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005962 return 0;
5963
Sathya Perla5fb379e2009-06-18 00:02:59 +00005964unsetup:
5965 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005966drv_cleanup:
5967 be_drv_cleanup(adapter);
5968unmap_bars:
5969 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005970free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005971 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005972rel_reg:
5973 pci_release_regions(pdev);
5974disable_dev:
5975 pci_disable_device(pdev);
5976do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005977 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005978 return status;
5979}
5980
5981static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5982{
5983 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005984
Ajit Khaparded4360d62013-11-22 12:51:09 -06005985 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005986 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005987
Kalesh AP87ac1a52015-02-23 04:20:15 -05005988 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005989
5990 pci_save_state(pdev);
5991 pci_disable_device(pdev);
5992 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5993 return 0;
5994}
5995
Kalesh AP484d76f2015-02-23 04:20:14 -05005996static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005997{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005998 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005999 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006000
6001 status = pci_enable_device(pdev);
6002 if (status)
6003 return status;
6004
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006005 pci_restore_state(pdev);
6006
Kalesh AP484d76f2015-02-23 04:20:14 -05006007 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00006008 if (status)
6009 return status;
6010
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306011 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006012
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006013 return 0;
6014}
6015
Sathya Perla82456b02010-02-17 01:35:37 +00006016/*
6017 * An FLR will stop BE from DMAing any data.
6018 */
6019static void be_shutdown(struct pci_dev *pdev)
6020{
6021 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006022
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006023 if (!adapter)
6024 return;
Sathya Perla82456b02010-02-17 01:35:37 +00006025
Devesh Sharmad114f992014-06-10 19:32:15 +05306026 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00006027 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006028 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00006029
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00006030 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006031
Ajit Khaparde57841862011-04-06 18:08:43 +00006032 be_cmd_reset_function(adapter);
6033
Sathya Perla82456b02010-02-17 01:35:37 +00006034 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00006035}
6036
Sathya Perlacf588472010-02-14 21:22:01 +00006037static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05306038 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00006039{
6040 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006041
6042 dev_err(&adapter->pdev->dev, "EEH error detected\n");
6043
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306044 be_roce_dev_remove(adapter);
6045
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306046 if (!be_check_error(adapter, BE_ERROR_EEH)) {
6047 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00006048
Sathya Perlaeb7dd462015-02-23 04:20:11 -05006049 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00006050
Kalesh AP87ac1a52015-02-23 04:20:15 -05006051 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006052 }
Sathya Perlacf588472010-02-14 21:22:01 +00006053
6054 if (state == pci_channel_io_perm_failure)
6055 return PCI_ERS_RESULT_DISCONNECT;
6056
6057 pci_disable_device(pdev);
6058
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006059 /* The error could cause the FW to trigger a flash debug dump.
6060 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006061 * can cause it not to recover; wait for it to finish.
6062 * Wait only for first function as it is needed only once per
6063 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00006064 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00006065 if (pdev->devfn == 0)
6066 ssleep(30);
6067
Sathya Perlacf588472010-02-14 21:22:01 +00006068 return PCI_ERS_RESULT_NEED_RESET;
6069}
6070
6071static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6072{
6073 struct be_adapter *adapter = pci_get_drvdata(pdev);
6074 int status;
6075
6076 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006077
6078 status = pci_enable_device(pdev);
6079 if (status)
6080 return PCI_ERS_RESULT_DISCONNECT;
6081
6082 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006083 pci_restore_state(pdev);
6084
6085 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00006086 dev_info(&adapter->pdev->dev,
6087 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006088 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00006089 if (status)
6090 return PCI_ERS_RESULT_DISCONNECT;
6091
Sathya Perlad6b6d982012-09-05 01:56:48 +00006092 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05306093 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00006094 return PCI_ERS_RESULT_RECOVERED;
6095}
6096
6097static void be_eeh_resume(struct pci_dev *pdev)
6098{
6099 int status = 0;
6100 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00006101
6102 dev_info(&adapter->pdev->dev, "EEH resume\n");
6103
6104 pci_save_state(pdev);
6105
Kalesh AP484d76f2015-02-23 04:20:14 -05006106 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00006107 if (status)
6108 goto err;
6109
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05306110 be_roce_dev_add(adapter);
6111
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05306112 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00006113 return;
6114err:
6115 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00006116}
6117
Vasundhara Volamace40af2015-03-04 00:44:34 -05006118static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6119{
6120 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006121 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05006122 int status;
6123
6124 if (!num_vfs)
6125 be_vf_clear(adapter);
6126
6127 adapter->num_vfs = num_vfs;
6128
6129 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6130 dev_warn(&pdev->dev,
6131 "Cannot disable VFs while they are assigned\n");
6132 return -EBUSY;
6133 }
6134
6135 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6136 * are equally distributed across the max-number of VFs. The user may
6137 * request only a subset of the max-vfs to be enabled.
6138 * Based on num_vfs, redistribute the resources across num_vfs so that
6139 * each VF will have access to more number of resources.
6140 * This facility is not available in BE3 FW.
6141 * Also, this is done by FW in Lancer chip.
6142 */
6143 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006144 be_calculate_vf_res(adapter, adapter->num_vfs,
6145 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006146 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04006147 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05006148 if (status)
6149 dev_err(&pdev->dev,
6150 "Failed to optimize SR-IOV resources\n");
6151 }
6152
6153 status = be_get_resources(adapter);
6154 if (status)
6155 return be_cmd_status(status);
6156
6157 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6158 rtnl_lock();
6159 status = be_update_queues(adapter);
6160 rtnl_unlock();
6161 if (status)
6162 return be_cmd_status(status);
6163
6164 if (adapter->num_vfs)
6165 status = be_vf_setup(adapter);
6166
6167 if (!status)
6168 return adapter->num_vfs;
6169
6170 return 0;
6171}
6172
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006173static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006174 .error_detected = be_eeh_err_detected,
6175 .slot_reset = be_eeh_reset,
6176 .resume = be_eeh_resume,
6177};
6178
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006179static struct pci_driver be_driver = {
6180 .name = DRV_NAME,
6181 .id_table = be_dev_ids,
6182 .probe = be_probe,
6183 .remove = be_remove,
6184 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006185 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006186 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006187 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006188 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006189};
6190
6191static int __init be_init_module(void)
6192{
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306193 int status;
6194
Joe Perches8e95a202009-12-03 07:58:21 +00006195 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6196 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006197 printk(KERN_WARNING DRV_NAME
6198 " : Module param rx_frag_size must be 2048/4096/8192."
6199 " Using 2048\n");
6200 rx_frag_size = 2048;
6201 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006202
Vasundhara Volamace40af2015-03-04 00:44:34 -05006203 if (num_vfs > 0) {
6204 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6205 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6206 }
6207
Sathya Perlab7172412016-07-27 05:26:18 -04006208 be_wq = create_singlethread_workqueue("be_wq");
6209 if (!be_wq) {
6210 pr_warn(DRV_NAME "workqueue creation failed\n");
6211 return -1;
6212 }
6213
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306214 be_err_recovery_workq =
6215 create_singlethread_workqueue("be_err_recover");
6216 if (!be_err_recovery_workq)
6217 pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6218
6219 status = pci_register_driver(&be_driver);
6220 if (status) {
6221 destroy_workqueue(be_wq);
6222 be_destroy_err_recovery_workq();
6223 }
6224 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006225}
6226module_init(be_init_module);
6227
6228static void __exit be_exit_module(void)
6229{
6230 pci_unregister_driver(&be_driver);
Sathya Perlab7172412016-07-27 05:26:18 -04006231
Sriharsha Basavapatna710f3e52016-09-07 19:57:49 +05306232 be_destroy_err_recovery_workq();
6233
Sathya Perlab7172412016-07-27 05:26:18 -04006234 if (be_wq)
6235 destroy_workqueue(be_wq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006236}
6237module_exit(be_exit_module);