blob: 03f6f6db2d3cf13ba115ef1bcb8fd8ec683d785c [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Somnath Kotur7dfbe7d2016-06-22 08:54:56 -04002 * Copyright (C) 2005 - 2016 Broadcom
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500128#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
129 BE_IF_FLAGS_BROADCAST | \
130 BE_IF_FLAGS_MULTICAST | \
131 BE_IF_FLAGS_PASS_L3L4_ERRORS)
132
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530136
Sathya Perla1cfafab2012-02-23 18:50:15 +0000137 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000138 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
139 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000140 mem->va = NULL;
141 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142}
143
144static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530145 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146{
147 struct be_dma_mem *mem = &q->dma_mem;
148
149 memset(q, 0, sizeof(*q));
150 q->len = len;
151 q->entry_size = entry_size;
152 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700153 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
154 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000156 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157 return 0;
158}
159
Somnath Kotur68c45a22013-03-14 02:42:07 +0000160static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161{
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163
Sathya Perladb3ea782011-08-22 19:41:52 +0000164 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530165 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000174
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530176 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177}
178
Somnath Kotur68c45a22013-03-14 02:42:07 +0000179static void be_intr_set(struct be_adapter *adapter, bool enable)
180{
181 int status = 0;
182
183 /* On lancer interrupts can't be controlled via this register */
184 if (lancer_chip(adapter))
185 return;
186
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530187 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188 return;
189
190 status = be_cmd_intr_set(adapter, enable);
191 if (status)
192 be_reg_intr_set(adapter, enable);
193}
194
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196{
197 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530198
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530199 if (be_check_error(adapter, BE_ERROR_HW))
200 return;
201
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= qid & DB_RQ_RING_ID_MASK;
203 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000204
205 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000206 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207}
208
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000209static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
210 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211{
212 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530213
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530214 if (be_check_error(adapter, BE_ERROR_HW))
215 return;
216
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000217 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000219
220 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000221 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222}
223
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400225 bool arm, bool clear_int, u16 num_popped,
226 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227{
228 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530231 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000232
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530233 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000234 return;
235
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236 if (arm)
237 val |= 1 << DB_EQ_REARM_SHIFT;
238 if (clear_int)
239 val |= 1 << DB_EQ_CLR_SHIFT;
240 val |= 1 << DB_EQ_EVNT_SHIFT;
241 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400242 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000243 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244}
245
Sathya Perla8788fdc2009-07-27 22:52:03 +0000246void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247{
248 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530249
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000251 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
252 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000253
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530254 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000255 return;
256
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700257 if (arm)
258 val |= 1 << DB_CQ_REARM_SHIFT;
259 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000260 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261}
262
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700263static int be_mac_addr_set(struct net_device *netdev, void *p)
264{
265 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700267 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530268 int status;
269 u8 mac[ETH_ALEN];
270 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700271
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000272 if (!is_valid_ether_addr(addr->sa_data))
273 return -EADDRNOTAVAIL;
274
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530275 /* Proceed further only if, User provided MAC is different
276 * from active MAC
277 */
278 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
279 return 0;
280
Kalesh APbcc84142015-08-05 03:27:48 -0400281 /* if device is not running, copy MAC to netdev->dev_addr */
282 if (!netif_running(netdev))
283 goto done;
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
286 * privilege or if PF did not provision the new MAC address.
287 * On BE3, this cmd will always fail if the VF doesn't have the
288 * FILTMGMT privilege. This failure is OK, only if the PF programmed
289 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530291 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
292 adapter->if_handle, &adapter->pmac_id[0], 0);
293 if (!status) {
294 curr_pmac_id = adapter->pmac_id[0];
295
296 /* Delete the old programmed MAC. This call may fail if the
297 * old MAC was already deleted by the PF driver.
298 */
299 if (adapter->pmac_id[0] != old_pmac_id)
300 be_cmd_pmac_del(adapter, adapter->if_handle,
301 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000302 }
303
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 /* Decide if the new MAC is successfully activated only after
305 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000306 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530307 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
308 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000309 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000310 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311
Sathya Perla5a712c12013-07-23 15:24:59 +0530312 /* The MAC change did not happen, either due to lack of privilege
313 * or PF didn't pre-provision.
314 */
dingtianhong61d23e92013-12-30 15:40:43 +0800315 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530316 status = -EPERM;
317 goto err;
318 }
Kalesh APbcc84142015-08-05 03:27:48 -0400319done:
320 ether_addr_copy(netdev->dev_addr, addr->sa_data);
321 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000322 return 0;
323err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530324 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700325 return status;
326}
327
Sathya Perlaca34fe32012-11-06 17:48:56 +0000328/* BE2 supports only v0 cmd */
329static void *hw_stats_from_cmd(struct be_adapter *adapter)
330{
331 if (BE2_chip(adapter)) {
332 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
333
334 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500335 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000336 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
337
338 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500339 } else {
340 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
341
342 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000343 }
344}
345
346/* BE2 supports only v0 cmd */
347static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
348{
349 if (BE2_chip(adapter)) {
350 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
351
352 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500353 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000354 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
355
356 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500357 } else {
358 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
359
360 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000361 }
362}
363
364static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
367 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
368 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 &rxf_stats->port[adapter->port_num];
371 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_pause_frames = port_stats->rx_pause_frames;
375 drvs->rx_crc_errors = port_stats->rx_crc_errors;
376 drvs->rx_control_frames = port_stats->rx_control_frames;
377 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
378 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
379 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
380 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
381 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
382 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
383 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
384 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
385 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
386 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
387 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_dropped_header_too_small =
390 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000391 drvs->rx_address_filtered =
392 port_stats->rx_address_filtered +
393 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 drvs->rx_alignment_symbol_errors =
395 port_stats->rx_alignment_symbol_errors;
396
397 drvs->tx_pauseframes = port_stats->tx_pauseframes;
398 drvs->tx_controlframes = port_stats->tx_controlframes;
399
400 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000401 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000404 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->forwarded_packets = rxf_stats->forwarded_packets;
407 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000408 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
409 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
411}
412
Sathya Perlaca34fe32012-11-06 17:48:56 +0000413static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000415 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000423 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
424 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->rx_pause_frames = port_stats->rx_pause_frames;
426 drvs->rx_crc_errors = port_stats->rx_crc_errors;
427 drvs->rx_control_frames = port_stats->rx_control_frames;
428 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
429 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
430 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
431 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
432 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
433 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
434 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
435 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
436 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
437 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
440 drvs->rx_input_fifo_overflow_drop =
441 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000442 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000445 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000448 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 drvs->jabber_events = port_stats->jabber_events;
450 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000452 drvs->forwarded_packets = rxf_stats->forwarded_packets;
453 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
455 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000456 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
457}
458
Ajit Khaparde61000862013-10-03 16:16:33 -0500459static void populate_be_v2_stats(struct be_adapter *adapter)
460{
461 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
462 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
463 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
464 struct be_port_rxf_stats_v2 *port_stats =
465 &rxf_stats->port[adapter->port_num];
466 struct be_drv_stats *drvs = &adapter->drv_stats;
467
468 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
469 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
470 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
471 drvs->rx_pause_frames = port_stats->rx_pause_frames;
472 drvs->rx_crc_errors = port_stats->rx_crc_errors;
473 drvs->rx_control_frames = port_stats->rx_control_frames;
474 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
475 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
476 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
477 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
478 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
479 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
480 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
481 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
482 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
483 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
484 drvs->rx_dropped_header_too_small =
485 port_stats->rx_dropped_header_too_small;
486 drvs->rx_input_fifo_overflow_drop =
487 port_stats->rx_input_fifo_overflow_drop;
488 drvs->rx_address_filtered = port_stats->rx_address_filtered;
489 drvs->rx_alignment_symbol_errors =
490 port_stats->rx_alignment_symbol_errors;
491 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
492 drvs->tx_pauseframes = port_stats->tx_pauseframes;
493 drvs->tx_controlframes = port_stats->tx_controlframes;
494 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
495 drvs->jabber_events = port_stats->jabber_events;
496 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
497 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
498 drvs->forwarded_packets = rxf_stats->forwarded_packets;
499 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
500 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
501 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
502 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530503 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500504 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
505 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
506 drvs->rx_roce_frames = port_stats->roce_frames_received;
507 drvs->roce_drops_crc = port_stats->roce_drops_crc;
508 drvs->roce_drops_payload_len =
509 port_stats->roce_drops_payload_len;
510 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500511}
512
Selvin Xavier005d5692011-05-16 07:36:35 +0000513static void populate_lancer_stats(struct be_adapter *adapter)
514{
Selvin Xavier005d5692011-05-16 07:36:35 +0000515 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530516 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517
518 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
519 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
520 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
521 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000522 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000523 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
525 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
526 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
527 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
528 drvs->rx_dropped_tcp_length =
529 pport_stats->rx_dropped_invalid_tcp_length;
530 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
533 drvs->rx_dropped_header_too_small =
534 pport_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000536 drvs->rx_address_filtered =
537 pport_stats->rx_address_filtered +
538 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000539 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000540 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000541 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
542 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000543 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000544 drvs->forwarded_packets = pport_stats->num_forwards_lo;
545 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000546 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000547 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000548}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000549
Sathya Perla09c1c682011-08-22 19:41:53 +0000550static void accumulate_16bit_val(u32 *acc, u16 val)
551{
552#define lo(x) (x & 0xFFFF)
553#define hi(x) (x & 0xFFFF0000)
554 bool wrapped = val < lo(*acc);
555 u32 newacc = hi(*acc) + val;
556
557 if (wrapped)
558 newacc += 65536;
559 ACCESS_ONCE(*acc) = newacc;
560}
561
Jingoo Han4188e7d2013-08-05 18:02:02 +0900562static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530563 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000564{
565 if (!BEx_chip(adapter))
566 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
567 else
568 /* below erx HW counter can actually wrap around after
569 * 65535. Driver accumulates a 32-bit value
570 */
571 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
572 (u16)erx_stat);
573}
574
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000575void be_parse_stats(struct be_adapter *adapter)
576{
Ajit Khaparde61000862013-10-03 16:16:33 -0500577 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000578 struct be_rx_obj *rxo;
579 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000580 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000581
Sathya Perlaca34fe32012-11-06 17:48:56 +0000582 if (lancer_chip(adapter)) {
583 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000584 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000585 if (BE2_chip(adapter))
586 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500587 else if (BE3_chip(adapter))
588 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000589 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500590 else
591 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000592
Ajit Khaparde61000862013-10-03 16:16:33 -0500593 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000594 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000595 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
596 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000597 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000598 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000599}
600
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530602 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700603{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000605 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000607 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 u64 pkts, bytes;
609 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700610 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611
Sathya Perla3abcded2010-10-03 22:12:27 -0700612 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530614
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700616 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 pkts = rx_stats(rxo)->rx_pkts;
618 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700619 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000620 stats->rx_packets += pkts;
621 stats->rx_bytes += bytes;
622 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
623 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
624 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700625 }
626
Sathya Perla3c8def92011-06-12 20:01:58 +0000627 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000628 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530629
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700631 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 pkts = tx_stats(txo)->tx_pkts;
633 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700634 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->tx_packets += pkts;
636 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000637 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_alignment_symbol_errors +
642 drvs->rx_in_range_errors +
643 drvs->rx_out_range_errors +
644 drvs->rx_frame_too_long +
645 drvs->rx_dropped_too_small +
646 drvs->rx_dropped_too_short +
647 drvs->rx_dropped_header_too_small +
648 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000652 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000653 drvs->rx_out_range_errors +
654 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000655
Sathya Perlaab1594e2011-07-25 19:10:15 +0000656 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
658 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000659 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000660
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 /* receiver fifo overrun */
662 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000663 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000664 drvs->rx_input_fifo_overflow_drop +
665 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667}
668
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000669void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 struct net_device *netdev = adapter->netdev;
672
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000673 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000674 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000675 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000677
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530678 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000679 netif_carrier_on(netdev);
680 else
681 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200682
683 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684}
685
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500686static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687{
Sathya Perla3c8def92011-06-12 20:01:58 +0000688 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530689 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000690
Sathya Perlaab1594e2011-07-25 19:10:15 +0000691 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000692 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500693 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530694 stats->tx_pkts += tx_pkts;
695 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
696 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000697 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500700/* Returns number of WRBs needed for the skb */
701static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500703 /* +1 for the header wrb */
704 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705}
706
707static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
708{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500709 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
710 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
711 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
712 wrb->rsvd0 = 0;
713}
714
715/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
716 * to avoid the swap and shift/mask operations in wrb_fill().
717 */
718static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
719{
720 wrb->frag_pa_hi = 0;
721 wrb->frag_pa_lo = 0;
722 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000723 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724}
725
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000726static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530727 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000728{
729 u8 vlan_prio;
730 u16 vlan_tag;
731
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100732 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000733 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
734 /* If vlan priority provided by OS is NOT in available bmap */
735 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
736 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500737 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000738
739 return vlan_tag;
740}
741
Sathya Perlac9c47142014-03-27 10:46:19 +0530742/* Used only for IP tunnel packets */
743static u16 skb_inner_ip_proto(struct sk_buff *skb)
744{
745 return (inner_ip_hdr(skb)->version == 4) ?
746 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
747}
748
749static u16 skb_ip_proto(struct sk_buff *skb)
750{
751 return (ip_hdr(skb)->version == 4) ?
752 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
753}
754
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530755static inline bool be_is_txq_full(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
758}
759
760static inline bool be_can_txq_wake(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) < txo->q.len / 2;
763}
764
765static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
766{
767 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
768}
769
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530770static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
771 struct sk_buff *skb,
772 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530774 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000776 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, LSO, 1);
778 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000779 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530780 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530782 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530783 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530784 proto = skb_inner_ip_proto(skb);
785 } else {
786 proto = skb_ip_proto(skb);
787 }
788 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530789 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530790 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530791 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
793
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100794 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530795 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
796 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797 }
798
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530799 BE_WRB_F_SET(wrb_params->features, CRC, 1);
800}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500801
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530802static void wrb_fill_hdr(struct be_adapter *adapter,
803 struct be_eth_hdr_wrb *hdr,
804 struct be_wrb_params *wrb_params,
805 struct sk_buff *skb)
806{
807 memset(hdr, 0, sizeof(*hdr));
808
809 SET_TX_WRB_HDR_BITS(crc, hdr,
810 BE_WRB_F_GET(wrb_params->features, CRC));
811 SET_TX_WRB_HDR_BITS(ipcs, hdr,
812 BE_WRB_F_GET(wrb_params->features, IPCS));
813 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
814 BE_WRB_F_GET(wrb_params->features, TCPCS));
815 SET_TX_WRB_HDR_BITS(udpcs, hdr,
816 BE_WRB_F_GET(wrb_params->features, UDPCS));
817
818 SET_TX_WRB_HDR_BITS(lso, hdr,
819 BE_WRB_F_GET(wrb_params->features, LSO));
820 SET_TX_WRB_HDR_BITS(lso6, hdr,
821 BE_WRB_F_GET(wrb_params->features, LSO6));
822 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
823
824 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
825 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500826 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530827 SET_TX_WRB_HDR_BITS(event, hdr,
828 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
829 SET_TX_WRB_HDR_BITS(vlan, hdr,
830 BE_WRB_F_GET(wrb_params->features, VLAN));
831 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
832
833 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
834 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530835 SET_TX_WRB_HDR_BITS(mgmt, hdr,
836 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837}
838
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000839static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530840 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000841{
842 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500843 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000844
Sathya Perla7101e112010-03-22 20:41:12 +0000845
Sathya Perlaf986afc2015-02-06 08:18:43 -0500846 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
847 (u64)le32_to_cpu(wrb->frag_pa_lo);
848 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000849 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500850 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500852 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000853 }
854}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530856/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530857static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530859 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700860
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530861 queue_head_inc(&txo->q);
862 return head;
863}
864
865/* Set up the WRB header for xmit */
866static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
867 struct be_tx_obj *txo,
868 struct be_wrb_params *wrb_params,
869 struct sk_buff *skb, u16 head)
870{
871 u32 num_frags = skb_wrb_cnt(skb);
872 struct be_queue_info *txq = &txo->q;
873 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
874
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530875 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500876 be_dws_cpu_to_le(hdr, sizeof(*hdr));
877
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500878 BUG_ON(txo->sent_skb_list[head]);
879 txo->sent_skb_list[head] = skb;
880 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530881 atomic_add(num_frags, &txq->used);
882 txo->last_req_wrb_cnt = num_frags;
883 txo->pend_wrb_cnt += num_frags;
884}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700885
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530886/* Setup a WRB fragment (buffer descriptor) for xmit */
887static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
888 int len)
889{
890 struct be_eth_wrb *wrb;
891 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530893 wrb = queue_head_node(txq);
894 wrb_fill(wrb, busaddr, len);
895 queue_head_inc(txq);
896}
897
898/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
899 * was invoked. The producer index is restored to the previous packet and the
900 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
901 */
902static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530903 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530904 u32 copied)
905{
906 struct device *dev;
907 struct be_eth_wrb *wrb;
908 struct be_queue_info *txq = &txo->q;
909
910 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500911 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530912
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500913 /* skip the first wrb (hdr); it's not mapped */
914 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000915 while (copied) {
916 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000917 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000918 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500919 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000920 queue_head_inc(txq);
921 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530922
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500923 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530924}
925
926/* Enqueue the given packet for transmit. This routine allocates WRBs for the
927 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
928 * of WRBs used up by the packet.
929 */
930static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
931 struct sk_buff *skb,
932 struct be_wrb_params *wrb_params)
933{
934 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
935 struct device *dev = &adapter->pdev->dev;
936 struct be_queue_info *txq = &txo->q;
937 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530938 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530939 dma_addr_t busaddr;
940 int len;
941
942 head = be_tx_get_wrb_hdr(txo);
943
944 if (skb->len > skb->data_len) {
945 len = skb_headlen(skb);
946
947 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
948 if (dma_mapping_error(dev, busaddr))
949 goto dma_err;
950 map_single = true;
951 be_tx_setup_wrb_frag(txo, busaddr, len);
952 copied += len;
953 }
954
955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
956 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
957 len = skb_frag_size(frag);
958
959 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
960 if (dma_mapping_error(dev, busaddr))
961 goto dma_err;
962 be_tx_setup_wrb_frag(txo, busaddr, len);
963 copied += len;
964 }
965
966 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
967
968 be_tx_stats_update(txo, skb);
969 return wrb_cnt;
970
971dma_err:
972 adapter->drv_stats.dma_map_errors++;
973 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000974 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700975}
976
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500977static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
978{
979 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
980}
981
Somnath Kotur93040ae2012-06-26 22:32:10 +0000982static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000983 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530984 struct be_wrb_params
985 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000986{
987 u16 vlan_tag = 0;
988
989 skb = skb_share_check(skb, GFP_ATOMIC);
990 if (unlikely(!skb))
991 return skb;
992
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100993 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000994 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530995
996 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
997 if (!vlan_tag)
998 vlan_tag = adapter->pvid;
999 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1000 * skip VLAN insertion
1001 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301002 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301003 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001004
1005 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001006 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1007 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001008 if (unlikely(!skb))
1009 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001010 skb->vlan_tci = 0;
1011 }
1012
1013 /* Insert the outer VLAN, if any */
1014 if (adapter->qnq_vid) {
1015 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001016 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1017 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001018 if (unlikely(!skb))
1019 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301020 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001021 }
1022
Somnath Kotur93040ae2012-06-26 22:32:10 +00001023 return skb;
1024}
1025
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001026static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1027{
1028 struct ethhdr *eh = (struct ethhdr *)skb->data;
1029 u16 offset = ETH_HLEN;
1030
1031 if (eh->h_proto == htons(ETH_P_IPV6)) {
1032 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1033
1034 offset += sizeof(struct ipv6hdr);
1035 if (ip6h->nexthdr != NEXTHDR_TCP &&
1036 ip6h->nexthdr != NEXTHDR_UDP) {
1037 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301038 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001039
1040 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1041 if (ehdr->hdrlen == 0xff)
1042 return true;
1043 }
1044 }
1045 return false;
1046}
1047
1048static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1049{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001050 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051}
1052
Sathya Perla748b5392014-05-09 13:29:13 +05301053static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001054{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001055 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001056}
1057
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301058static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1059 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301060 struct be_wrb_params
1061 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001063 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001064 unsigned int eth_hdr_len;
1065 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001066
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001067 /* For padded packets, BE HW modifies tot_len field in IP header
1068 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001069 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001070 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001071 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1072 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001073 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001074 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001075 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001076 ip = (struct iphdr *)ip_hdr(skb);
1077 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1078 }
1079
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001080 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301081 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001082 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301083 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001084 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301085 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001086
Somnath Kotur93040ae2012-06-26 22:32:10 +00001087 /* HW has a bug wherein it will calculate CSUM for VLAN
1088 * pkts even though it is disabled.
1089 * Manually insert VLAN in pkt.
1090 */
1091 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001092 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301093 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001094 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301095 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001096 }
1097
1098 /* HW may lockup when VLAN HW tagging is requested on
1099 * certain ipv6 packets. Drop such pkts if the HW workaround to
1100 * skip HW tagging is not enabled by FW.
1101 */
1102 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301103 (adapter->pvid || adapter->qnq_vid) &&
1104 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001105 goto tx_drop;
1106
1107 /* Manual VLAN tag insertion to prevent:
1108 * ASIC lockup when the ASIC inserts VLAN tag into
1109 * certain ipv6 packets. Insert VLAN tags in driver,
1110 * and set event, completion, vlan bits accordingly
1111 * in the Tx WRB.
1112 */
1113 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1114 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301115 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001116 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301117 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001118 }
1119
Sathya Perlaee9c7992013-05-22 23:04:55 +00001120 return skb;
1121tx_drop:
1122 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301123err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001124 return NULL;
1125}
1126
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301127static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1128 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301129 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301130{
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301131 int err;
1132
Suresh Reddy8227e992015-10-12 03:47:19 -04001133 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1134 * packets that are 32b or less may cause a transmit stall
1135 * on that port. The workaround is to pad such packets
1136 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301137 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001138 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001139 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301140 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301141 }
1142
1143 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301144 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301145 if (!skb)
1146 return NULL;
1147 }
1148
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05301149 /* The stack can send us skbs with length greater than
1150 * what the HW can handle. Trim the extra bytes.
1151 */
1152 WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1153 err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1154 WARN_ON(err);
1155
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301156 return skb;
1157}
1158
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001159static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1160{
1161 struct be_queue_info *txq = &txo->q;
1162 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1163
1164 /* Mark the last request eventable if it hasn't been marked already */
1165 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1166 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1167
1168 /* compose a dummy wrb if there are odd set of wrbs to notify */
1169 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001170 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001171 queue_head_inc(txq);
1172 atomic_inc(&txq->used);
1173 txo->pend_wrb_cnt++;
1174 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1175 TX_HDR_WRB_NUM_SHIFT);
1176 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1177 TX_HDR_WRB_NUM_SHIFT);
1178 }
1179 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1180 txo->pend_wrb_cnt = 0;
1181}
1182
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301183/* OS2BMC related */
1184
1185#define DHCP_CLIENT_PORT 68
1186#define DHCP_SERVER_PORT 67
1187#define NET_BIOS_PORT1 137
1188#define NET_BIOS_PORT2 138
1189#define DHCPV6_RAS_PORT 547
1190
1191#define is_mc_allowed_on_bmc(adapter, eh) \
1192 (!is_multicast_filt_enabled(adapter) && \
1193 is_multicast_ether_addr(eh->h_dest) && \
1194 !is_broadcast_ether_addr(eh->h_dest))
1195
1196#define is_bc_allowed_on_bmc(adapter, eh) \
1197 (!is_broadcast_filt_enabled(adapter) && \
1198 is_broadcast_ether_addr(eh->h_dest))
1199
1200#define is_arp_allowed_on_bmc(adapter, skb) \
1201 (is_arp(skb) && is_arp_filt_enabled(adapter))
1202
1203#define is_broadcast_packet(eh, adapter) \
1204 (is_multicast_ether_addr(eh->h_dest) && \
1205 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1206
1207#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1208
1209#define is_arp_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1211
1212#define is_dhcp_client_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1214
1215#define is_dhcp_srvr_filt_enabled(adapter) \
1216 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1217
1218#define is_nbios_filt_enabled(adapter) \
1219 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1220
1221#define is_ipv6_na_filt_enabled(adapter) \
1222 (adapter->bmc_filt_mask & \
1223 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1224
1225#define is_ipv6_ra_filt_enabled(adapter) \
1226 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1227
1228#define is_ipv6_ras_filt_enabled(adapter) \
1229 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1230
1231#define is_broadcast_filt_enabled(adapter) \
1232 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1233
1234#define is_multicast_filt_enabled(adapter) \
1235 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1236
1237static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1238 struct sk_buff **skb)
1239{
1240 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1241 bool os2bmc = false;
1242
1243 if (!be_is_os2bmc_enabled(adapter))
1244 goto done;
1245
1246 if (!is_multicast_ether_addr(eh->h_dest))
1247 goto done;
1248
1249 if (is_mc_allowed_on_bmc(adapter, eh) ||
1250 is_bc_allowed_on_bmc(adapter, eh) ||
1251 is_arp_allowed_on_bmc(adapter, (*skb))) {
1252 os2bmc = true;
1253 goto done;
1254 }
1255
1256 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1257 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1258 u8 nexthdr = hdr->nexthdr;
1259
1260 if (nexthdr == IPPROTO_ICMPV6) {
1261 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1262
1263 switch (icmp6->icmp6_type) {
1264 case NDISC_ROUTER_ADVERTISEMENT:
1265 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1266 goto done;
1267 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1268 os2bmc = is_ipv6_na_filt_enabled(adapter);
1269 goto done;
1270 default:
1271 break;
1272 }
1273 }
1274 }
1275
1276 if (is_udp_pkt((*skb))) {
1277 struct udphdr *udp = udp_hdr((*skb));
1278
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001279 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301280 case DHCP_CLIENT_PORT:
1281 os2bmc = is_dhcp_client_filt_enabled(adapter);
1282 goto done;
1283 case DHCP_SERVER_PORT:
1284 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1285 goto done;
1286 case NET_BIOS_PORT1:
1287 case NET_BIOS_PORT2:
1288 os2bmc = is_nbios_filt_enabled(adapter);
1289 goto done;
1290 case DHCPV6_RAS_PORT:
1291 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1292 goto done;
1293 default:
1294 break;
1295 }
1296 }
1297done:
1298 /* For packets over a vlan, which are destined
1299 * to BMC, asic expects the vlan to be inline in the packet.
1300 */
1301 if (os2bmc)
1302 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1303
1304 return os2bmc;
1305}
1306
Sathya Perlaee9c7992013-05-22 23:04:55 +00001307static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1308{
1309 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001310 u16 q_idx = skb_get_queue_mapping(skb);
1311 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301312 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301313 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001314 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001315
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301316 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001317 if (unlikely(!skb))
1318 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001319
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301320 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1321
1322 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001323 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001324 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001325 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001326 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001327
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301328 /* if os2bmc is enabled and if the pkt is destined to bmc,
1329 * enqueue the pkt a 2nd time with mgmt bit set.
1330 */
1331 if (be_send_pkt_to_bmc(adapter, &skb)) {
1332 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1333 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1334 if (unlikely(!wrb_cnt))
1335 goto drop;
1336 else
1337 skb_get(skb);
1338 }
1339
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301340 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001341 netif_stop_subqueue(netdev, q_idx);
1342 tx_stats(txo)->tx_stops++;
1343 }
1344
1345 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1346 be_xmit_flush(adapter, txo);
1347
1348 return NETDEV_TX_OK;
1349drop:
1350 tx_stats(txo)->tx_drv_drops++;
1351 /* Flush the already enqueued tx requests */
1352 if (flush && txo->pend_wrb_cnt)
1353 be_xmit_flush(adapter, txo);
1354
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355 return NETDEV_TX_OK;
1356}
1357
1358static int be_change_mtu(struct net_device *netdev, int new_mtu)
1359{
1360 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301361 struct device *dev = &adapter->pdev->dev;
1362
1363 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1364 dev_info(dev, "MTU must be between %d and %d bytes\n",
1365 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366 return -EINVAL;
1367 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301368
1369 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301370 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001371 netdev->mtu = new_mtu;
1372 return 0;
1373}
1374
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001375static inline bool be_in_all_promisc(struct be_adapter *adapter)
1376{
1377 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1378 BE_IF_FLAGS_ALL_PROMISCUOUS;
1379}
1380
1381static int be_set_vlan_promisc(struct be_adapter *adapter)
1382{
1383 struct device *dev = &adapter->pdev->dev;
1384 int status;
1385
1386 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1387 return 0;
1388
1389 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1390 if (!status) {
1391 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1392 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1393 } else {
1394 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1395 }
1396 return status;
1397}
1398
1399static int be_clear_vlan_promisc(struct be_adapter *adapter)
1400{
1401 struct device *dev = &adapter->pdev->dev;
1402 int status;
1403
1404 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1405 if (!status) {
1406 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1407 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1408 }
1409 return status;
1410}
1411
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001413 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1414 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415 */
Sathya Perla10329df2012-06-05 19:37:18 +00001416static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001417{
Vasundhara Volam50762662014-09-12 17:39:14 +05301418 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001419 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301420 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001421 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001422
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001423 /* No need to change the VLAN state if the I/F is in promiscuous */
1424 if (adapter->netdev->flags & IFF_PROMISC)
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001425 return 0;
1426
Sathya Perla92bf14a2013-08-27 16:57:32 +05301427 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001428 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001429
Somnath Kotur841f60f2016-07-27 05:26:15 -04001430 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1431 status = be_clear_vlan_promisc(adapter);
1432 if (status)
1433 return status;
1434 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001435 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301436 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1437 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001438
Vasundhara Volam435452a2015-03-20 06:28:23 -04001439 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001440 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001441 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001442 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001443 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1444 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301445 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001446 return be_set_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001448 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001449}
1450
Patrick McHardy80d5c362013-04-19 02:04:28 +00001451static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452{
1453 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001454 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001456 /* Packets with VID 0 are always received by Lancer by default */
1457 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301458 return status;
1459
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301460 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301461 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001462
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301463 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301464 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001465
Sathya Perla0aff1fb2016-07-27 05:26:16 -04001466 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467}
1468
Patrick McHardy80d5c362013-04-19 02:04:28 +00001469static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470{
1471 struct be_adapter *adapter = netdev_priv(netdev);
1472
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001473 /* Packets with VID 0 are always received by Lancer by default */
1474 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301475 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001476
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301477 if (!test_bit(vid, adapter->vids))
1478 return 0;
1479
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301480 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301481 adapter->vlans_added--;
1482
1483 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001484}
1485
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001486static void be_set_all_promisc(struct be_adapter *adapter)
1487{
1488 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1489 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1490}
1491
1492static void be_set_mc_promisc(struct be_adapter *adapter)
1493{
1494 int status;
1495
1496 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1497 return;
1498
1499 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1500 if (!status)
1501 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1502}
1503
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001504static void be_set_uc_promisc(struct be_adapter *adapter)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001505{
1506 int status;
1507
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001508 if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1509 return;
1510
1511 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001512 if (!status)
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001513 adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1514}
1515
1516static void be_clear_uc_promisc(struct be_adapter *adapter)
1517{
1518 int status;
1519
1520 if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1521 return;
1522
1523 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1524 if (!status)
1525 adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1526}
1527
1528/* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1529 * We use a single callback function for both sync and unsync. We really don't
1530 * add/remove addresses through this callback. But, we use it to detect changes
1531 * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1532 */
1533static int be_uc_list_update(struct net_device *netdev,
1534 const unsigned char *addr)
1535{
1536 struct be_adapter *adapter = netdev_priv(netdev);
1537
1538 adapter->update_uc_list = true;
1539 return 0;
1540}
1541
1542static int be_mc_list_update(struct net_device *netdev,
1543 const unsigned char *addr)
1544{
1545 struct be_adapter *adapter = netdev_priv(netdev);
1546
1547 adapter->update_mc_list = true;
1548 return 0;
1549}
1550
1551static void be_set_mc_list(struct be_adapter *adapter)
1552{
1553 struct net_device *netdev = adapter->netdev;
1554 bool mc_promisc = false;
1555 int status;
1556
1557 __dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1558
1559 if (netdev->flags & IFF_PROMISC) {
1560 adapter->update_mc_list = false;
1561 } else if (netdev->flags & IFF_ALLMULTI ||
1562 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1563 /* Enable multicast promisc if num configured exceeds
1564 * what we support
1565 */
1566 mc_promisc = true;
1567 adapter->update_mc_list = false;
1568 } else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1569 /* Update mc-list unconditionally if the iface was previously
1570 * in mc-promisc mode and now is out of that mode.
1571 */
1572 adapter->update_mc_list = true;
1573 }
1574
1575 if (mc_promisc) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001576 be_set_mc_promisc(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001577 } else if (adapter->update_mc_list) {
1578 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1579 if (!status)
1580 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1581 else
1582 be_set_mc_promisc(adapter);
1583
1584 adapter->update_mc_list = false;
1585 }
1586}
1587
1588static void be_clear_mc_list(struct be_adapter *adapter)
1589{
1590 struct net_device *netdev = adapter->netdev;
1591
1592 __dev_mc_unsync(netdev, NULL);
1593 be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001594}
1595
1596static void be_set_uc_list(struct be_adapter *adapter)
1597{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001598 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001599 struct netdev_hw_addr *ha;
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001600 bool uc_promisc = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001601 int i = 1; /* First slot is claimed by the Primary MAC */
1602
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001603 __dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001604
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001605 if (netdev->flags & IFF_PROMISC) {
1606 adapter->update_uc_list = false;
1607 } else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1608 uc_promisc = true;
1609 adapter->update_uc_list = false;
1610 } else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1611 /* Update uc-list unconditionally if the iface was previously
1612 * in uc-promisc mode and now is out of that mode.
1613 */
1614 adapter->update_uc_list = true;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001615 }
1616
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001617 if (uc_promisc) {
1618 be_set_uc_promisc(adapter);
1619 } else if (adapter->update_uc_list) {
1620 be_clear_uc_promisc(adapter);
1621
1622 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1623 be_cmd_pmac_del(adapter, adapter->if_handle,
1624 adapter->pmac_id[i], 0);
1625
1626 netdev_for_each_uc_addr(ha, adapter->netdev) {
1627 adapter->uc_macs++; /* First slot is for Primary MAC */
1628 be_cmd_pmac_add(adapter,
1629 (u8 *)ha->addr, adapter->if_handle,
1630 &adapter->pmac_id[adapter->uc_macs], 0);
1631 }
1632 adapter->update_uc_list = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001633 }
1634}
1635
1636static void be_clear_uc_list(struct be_adapter *adapter)
1637{
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001638 struct net_device *netdev = adapter->netdev;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001639 int i;
1640
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001641 __dev_uc_unsync(netdev, NULL);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001642 for (i = 1; i < (adapter->uc_macs + 1); i++)
1643 be_cmd_pmac_del(adapter, adapter->if_handle,
1644 adapter->pmac_id[i], 0);
1645 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301646}
1647
Sathya Perlaa54769f2011-10-24 02:45:00 +00001648static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649{
1650 struct be_adapter *adapter = netdev_priv(netdev);
1651
1652 if (netdev->flags & IFF_PROMISC) {
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001653 if (!be_in_all_promisc(adapter))
1654 be_set_all_promisc(adapter);
1655 } else if (be_in_all_promisc(adapter)) {
1656 /* We need to re-program the vlan-list or clear
1657 * vlan-promisc mode (if needed) when the interface
1658 * comes out of promisc mode.
1659 */
1660 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001662
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04001663 be_set_uc_list(adapter);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001664 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665}
1666
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001667static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1668{
1669 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001670 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001671 int status;
1672
Sathya Perla11ac75e2011-12-13 00:58:50 +00001673 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001674 return -EPERM;
1675
Sathya Perla11ac75e2011-12-13 00:58:50 +00001676 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001677 return -EINVAL;
1678
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301679 /* Proceed further only if user provided MAC is different
1680 * from active MAC
1681 */
1682 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1683 return 0;
1684
Sathya Perla3175d8c2013-07-23 15:25:03 +05301685 if (BEx_chip(adapter)) {
1686 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1687 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001688
Sathya Perla11ac75e2011-12-13 00:58:50 +00001689 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1690 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301691 } else {
1692 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1693 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001694 }
1695
Kalesh APabccf232014-07-17 16:20:24 +05301696 if (status) {
1697 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1698 mac, vf, status);
1699 return be_cmd_status(status);
1700 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001701
Kalesh APabccf232014-07-17 16:20:24 +05301702 ether_addr_copy(vf_cfg->mac_addr, mac);
1703
1704 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001705}
1706
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001707static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301708 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001709{
1710 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001711 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001712
Sathya Perla11ac75e2011-12-13 00:58:50 +00001713 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001714 return -EPERM;
1715
Sathya Perla11ac75e2011-12-13 00:58:50 +00001716 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001717 return -EINVAL;
1718
1719 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001720 vi->max_tx_rate = vf_cfg->tx_rate;
1721 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001722 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1723 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001724 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301725 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001726 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001727
1728 return 0;
1729}
1730
Vasundhara Volam435452a2015-03-20 06:28:23 -04001731static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1732{
1733 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1734 u16 vids[BE_NUM_VLANS_SUPPORTED];
1735 int vf_if_id = vf_cfg->if_handle;
1736 int status;
1737
1738 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001739 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001740 if (status)
1741 return status;
1742
1743 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1744 vids[0] = 0;
1745 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1746 if (!status)
1747 dev_info(&adapter->pdev->dev,
1748 "Cleared guest VLANs on VF%d", vf);
1749
1750 /* After TVT is enabled, disallow VFs to program VLAN filters */
1751 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1752 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1753 ~BE_PRIV_FILTMGMT, vf + 1);
1754 if (!status)
1755 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1756 }
1757 return 0;
1758}
1759
1760static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1761{
1762 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1763 struct device *dev = &adapter->pdev->dev;
1764 int status;
1765
1766 /* Reset Transparent VLAN Tagging. */
1767 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001768 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001769 if (status)
1770 return status;
1771
1772 /* Allow VFs to program VLAN filtering */
1773 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1774 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1775 BE_PRIV_FILTMGMT, vf + 1);
1776 if (!status) {
1777 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1778 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1779 }
1780 }
1781
1782 dev_info(dev,
1783 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1784 return 0;
1785}
1786
Sathya Perla748b5392014-05-09 13:29:13 +05301787static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001788{
1789 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001790 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001791 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001792
Sathya Perla11ac75e2011-12-13 00:58:50 +00001793 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001794 return -EPERM;
1795
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001796 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001797 return -EINVAL;
1798
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001799 if (vlan || qos) {
1800 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001801 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001802 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001803 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001804 }
1805
Kalesh APabccf232014-07-17 16:20:24 +05301806 if (status) {
1807 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001808 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1809 status);
Kalesh APabccf232014-07-17 16:20:24 +05301810 return be_cmd_status(status);
1811 }
1812
1813 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301814 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001815}
1816
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001817static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1818 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001819{
1820 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301821 struct device *dev = &adapter->pdev->dev;
1822 int percent_rate, status = 0;
1823 u16 link_speed = 0;
1824 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001825
Sathya Perla11ac75e2011-12-13 00:58:50 +00001826 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001827 return -EPERM;
1828
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001829 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001830 return -EINVAL;
1831
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001832 if (min_tx_rate)
1833 return -EINVAL;
1834
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301835 if (!max_tx_rate)
1836 goto config_qos;
1837
1838 status = be_cmd_link_status_query(adapter, &link_speed,
1839 &link_status, 0);
1840 if (status)
1841 goto err;
1842
1843 if (!link_status) {
1844 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301845 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301846 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001847 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001848
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301849 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1850 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1851 link_speed);
1852 status = -EINVAL;
1853 goto err;
1854 }
1855
1856 /* On Skyhawk the QOS setting must be done only as a % value */
1857 percent_rate = link_speed / 100;
1858 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1859 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1860 percent_rate);
1861 status = -EINVAL;
1862 goto err;
1863 }
1864
1865config_qos:
1866 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001867 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301868 goto err;
1869
1870 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1871 return 0;
1872
1873err:
1874 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1875 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301876 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001877}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301878
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301879static int be_set_vf_link_state(struct net_device *netdev, int vf,
1880 int link_state)
1881{
1882 struct be_adapter *adapter = netdev_priv(netdev);
1883 int status;
1884
1885 if (!sriov_enabled(adapter))
1886 return -EPERM;
1887
1888 if (vf >= adapter->num_vfs)
1889 return -EINVAL;
1890
1891 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301892 if (status) {
1893 dev_err(&adapter->pdev->dev,
1894 "Link state change on VF %d failed: %#x\n", vf, status);
1895 return be_cmd_status(status);
1896 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301897
Kalesh APabccf232014-07-17 16:20:24 +05301898 adapter->vf_cfg[vf].plink_tracking = link_state;
1899
1900 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301901}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001902
Kalesh APe7bcbd72015-05-06 05:30:32 -04001903static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1904{
1905 struct be_adapter *adapter = netdev_priv(netdev);
1906 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1907 u8 spoofchk;
1908 int status;
1909
1910 if (!sriov_enabled(adapter))
1911 return -EPERM;
1912
1913 if (vf >= adapter->num_vfs)
1914 return -EINVAL;
1915
1916 if (BEx_chip(adapter))
1917 return -EOPNOTSUPP;
1918
1919 if (enable == vf_cfg->spoofchk)
1920 return 0;
1921
1922 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1923
1924 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1925 0, spoofchk);
1926 if (status) {
1927 dev_err(&adapter->pdev->dev,
1928 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1929 return be_cmd_status(status);
1930 }
1931
1932 vf_cfg->spoofchk = enable;
1933 return 0;
1934}
1935
Sathya Perla2632baf2013-10-01 16:00:00 +05301936static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1937 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938{
Sathya Perla2632baf2013-10-01 16:00:00 +05301939 aic->rx_pkts_prev = rx_pkts;
1940 aic->tx_reqs_prev = tx_pkts;
1941 aic->jiffies = now;
1942}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001943
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001944static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301945{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001946 struct be_adapter *adapter = eqo->adapter;
1947 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301948 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301949 struct be_rx_obj *rxo;
1950 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001951 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301952 ulong now;
1953 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001954 int i;
1955
1956 aic = &adapter->aic_obj[eqo->idx];
1957 if (!aic->enable) {
1958 if (aic->jiffies)
1959 aic->jiffies = 0;
1960 eqd = aic->et_eqd;
1961 return eqd;
1962 }
1963
1964 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1965 do {
1966 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1967 rx_pkts += rxo->stats.rx_pkts;
1968 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1969 }
1970
1971 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1972 do {
1973 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1974 tx_pkts += txo->stats.tx_reqs;
1975 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1976 }
1977
1978 /* Skip, if wrapped around or first calculation */
1979 now = jiffies;
1980 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1981 rx_pkts < aic->rx_pkts_prev ||
1982 tx_pkts < aic->tx_reqs_prev) {
1983 be_aic_update(aic, rx_pkts, tx_pkts, now);
1984 return aic->prev_eqd;
1985 }
1986
1987 delta = jiffies_to_msecs(now - aic->jiffies);
1988 if (delta == 0)
1989 return aic->prev_eqd;
1990
1991 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1992 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1993 eqd = (pps / 15000) << 2;
1994
1995 if (eqd < 8)
1996 eqd = 0;
1997 eqd = min_t(u32, eqd, aic->max_eqd);
1998 eqd = max_t(u32, eqd, aic->min_eqd);
1999
2000 be_aic_update(aic, rx_pkts, tx_pkts, now);
2001
2002 return eqd;
2003}
2004
2005/* For Skyhawk-R only */
2006static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2007{
2008 struct be_adapter *adapter = eqo->adapter;
2009 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2010 ulong now = jiffies;
2011 int eqd;
2012 u32 mult_enc;
2013
2014 if (!aic->enable)
2015 return 0;
2016
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05302017 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002018 eqd = aic->prev_eqd;
2019 else
2020 eqd = be_get_new_eqd(eqo);
2021
2022 if (eqd > 100)
2023 mult_enc = R2I_DLY_ENC_1;
2024 else if (eqd > 60)
2025 mult_enc = R2I_DLY_ENC_2;
2026 else if (eqd > 20)
2027 mult_enc = R2I_DLY_ENC_3;
2028 else
2029 mult_enc = R2I_DLY_ENC_0;
2030
2031 aic->prev_eqd = eqd;
2032
2033 return mult_enc;
2034}
2035
2036void be_eqd_update(struct be_adapter *adapter, bool force_update)
2037{
2038 struct be_set_eqd set_eqd[MAX_EVT_QS];
2039 struct be_aic_obj *aic;
2040 struct be_eq_obj *eqo;
2041 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002042
Sathya Perla2632baf2013-10-01 16:00:00 +05302043 for_all_evt_queues(adapter, eqo, i) {
2044 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002045 eqd = be_get_new_eqd(eqo);
2046 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05302047 set_eqd[num].delay_multiplier = (eqd * 65)/100;
2048 set_eqd[num].eq_id = eqo->q.id;
2049 aic->prev_eqd = eqd;
2050 num++;
2051 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00002052 }
Sathya Perla2632baf2013-10-01 16:00:00 +05302053
2054 if (num)
2055 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07002056}
2057
Sathya Perla3abcded2010-10-03 22:12:27 -07002058static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05302059 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07002060{
Sathya Perlaac124ff2011-07-25 19:10:14 +00002061 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07002062
Sathya Perlaab1594e2011-07-25 19:10:15 +00002063 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07002064 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002065 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07002066 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05302067 if (rxcp->tunneled)
2068 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002069 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07002070 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00002071 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00002072 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00002073 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074}
2075
Sathya Perla2e588f82011-03-11 02:49:26 +00002076static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07002077{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00002078 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05302079 * Also ignore ipcksm for ipv6 pkts
2080 */
Sathya Perla2e588f82011-03-11 02:49:26 +00002081 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05302082 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07002083}
2084
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302085static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002086{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002087 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002088 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002089 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302090 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091
Sathya Perla3abcded2010-10-03 22:12:27 -07002092 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093 BUG_ON(!rx_page_info->page);
2094
Sathya Perlae50287b2014-03-04 12:14:38 +05302095 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002096 dma_unmap_page(&adapter->pdev->dev,
2097 dma_unmap_addr(rx_page_info, bus),
2098 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302099 rx_page_info->last_frag = false;
2100 } else {
2101 dma_sync_single_for_cpu(&adapter->pdev->dev,
2102 dma_unmap_addr(rx_page_info, bus),
2103 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002104 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002105
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302106 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107 atomic_dec(&rxq->used);
2108 return rx_page_info;
2109}
2110
2111/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002112static void be_rx_compl_discard(struct be_rx_obj *rxo,
2113 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002114{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002116 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002118 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302119 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002120 put_page(page_info->page);
2121 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122 }
2123}
2124
2125/*
2126 * skb_fill_rx_data forms a complete skb for an ether frame
2127 * indicated by rxcp.
2128 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002129static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2130 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002133 u16 i, j;
2134 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135 u8 *start;
2136
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302137 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002138 start = page_address(page_info->page) + page_info->page_offset;
2139 prefetch(start);
2140
2141 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002142 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002144 skb->len = curr_frag_len;
2145 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002146 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002147 /* Complete packet has now been moved to data */
2148 put_page(page_info->page);
2149 skb->data_len = 0;
2150 skb->tail += curr_frag_len;
2151 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002152 hdr_len = ETH_HLEN;
2153 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002154 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002155 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002156 skb_shinfo(skb)->frags[0].page_offset =
2157 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302158 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2159 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002160 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002161 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002162 skb->tail += hdr_len;
2163 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002164 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002165
Sathya Perla2e588f82011-03-11 02:49:26 +00002166 if (rxcp->pkt_size <= rx_frag_size) {
2167 BUG_ON(rxcp->num_rcvd != 1);
2168 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002169 }
2170
2171 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002172 remaining = rxcp->pkt_size - curr_frag_len;
2173 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302174 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002175 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002177 /* Coalesce all frags from the same physical page in one slot */
2178 if (page_info->page_offset == 0) {
2179 /* Fresh page */
2180 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002181 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002182 skb_shinfo(skb)->frags[j].page_offset =
2183 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002184 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002185 skb_shinfo(skb)->nr_frags++;
2186 } else {
2187 put_page(page_info->page);
2188 }
2189
Eric Dumazet9e903e02011-10-18 21:00:24 +00002190 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191 skb->len += curr_frag_len;
2192 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002193 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002194 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002195 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002197 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198}
2199
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002200/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302201static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002204 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002205 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002206 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002207
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002208 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002209 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002210 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002211 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212 return;
2213 }
2214
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002215 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002217 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002218 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002219 else
2220 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002222 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002223 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002224 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002225 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302226
Tom Herbertb6c0e892014-08-27 21:27:17 -07002227 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302228 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229
Jiri Pirko343e43c2011-08-25 02:50:51 +00002230 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002231 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002232
2233 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234}
2235
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002236/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002237static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2238 struct napi_struct *napi,
2239 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002241 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002243 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002244 u16 remaining, curr_frag_len;
2245 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002246
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002247 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002248 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002250 return;
2251 }
2252
Sathya Perla2e588f82011-03-11 02:49:26 +00002253 remaining = rxcp->pkt_size;
2254 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302255 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256
2257 curr_frag_len = min(remaining, rx_frag_size);
2258
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002259 /* Coalesce all frags from the same physical page in one slot */
2260 if (i == 0 || page_info->page_offset == 0) {
2261 /* First frag or Fresh page */
2262 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002263 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002264 skb_shinfo(skb)->frags[j].page_offset =
2265 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002266 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002267 } else {
2268 put_page(page_info->page);
2269 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002270 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002271 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002272 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002273 memset(page_info, 0, sizeof(*page_info));
2274 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002275 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002277 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002278 skb->len = rxcp->pkt_size;
2279 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002280 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002281 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002282 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002283 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302284
Tom Herbertb6c0e892014-08-27 21:27:17 -07002285 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002286
Jiri Pirko343e43c2011-08-25 02:50:51 +00002287 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002288 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002289
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002290 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002291}
2292
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002293static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2294 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302296 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2297 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2298 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2299 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2300 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2301 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2302 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2303 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2304 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2305 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2306 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002307 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302308 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2309 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002310 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302311 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302312 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302313 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002314}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002315
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002316static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2317 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002318{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302319 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2320 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2321 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2322 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2323 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2324 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2325 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2326 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2327 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2328 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2329 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002330 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302331 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2332 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002333 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302334 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2335 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002336}
2337
2338static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2339{
2340 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2341 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2342 struct be_adapter *adapter = rxo->adapter;
2343
2344 /* For checking the valid bit it is Ok to use either definition as the
2345 * valid bit is at the same position in both v0 and v1 Rx compl */
2346 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002347 return NULL;
2348
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002349 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002350 be_dws_le_to_cpu(compl, sizeof(*compl));
2351
2352 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002353 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002354 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002355 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002356
Somnath Koture38b1702013-05-29 22:55:56 +00002357 if (rxcp->ip_frag)
2358 rxcp->l4_csum = 0;
2359
Sathya Perla15d72182011-03-21 20:49:26 +00002360 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302361 /* In QNQ modes, if qnq bit is not set, then the packet was
2362 * tagged only with the transparent outer vlan-tag and must
2363 * not be treated as a vlan packet by host
2364 */
2365 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002366 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002367
Sathya Perla15d72182011-03-21 20:49:26 +00002368 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002369 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002370
Somnath Kotur939cf302011-08-18 21:51:49 -07002371 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302372 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002373 rxcp->vlanf = 0;
2374 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002375
2376 /* As the compl has been parsed, reset it; we wont touch it again */
2377 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378
Sathya Perla3abcded2010-10-03 22:12:27 -07002379 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380 return rxcp;
2381}
2382
Eric Dumazet1829b082011-03-01 05:48:12 +00002383static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002385 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002386
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002388 gfp |= __GFP_COMP;
2389 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390}
2391
2392/*
2393 * Allocate a page, split it to fragments of size rx_frag_size and post as
2394 * receive buffers to BE
2395 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302396static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002397{
Sathya Perla3abcded2010-10-03 22:12:27 -07002398 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002399 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002400 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002402 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002403 struct be_eth_rx_d *rxd;
2404 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302405 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406
Sathya Perla3abcded2010-10-03 22:12:27 -07002407 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302408 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002409 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002410 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002411 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002412 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002413 break;
2414 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002415 page_dmaaddr = dma_map_page(dev, pagep, 0,
2416 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002417 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002418 if (dma_mapping_error(dev, page_dmaaddr)) {
2419 put_page(pagep);
2420 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302421 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002422 break;
2423 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302424 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002425 } else {
2426 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302427 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002428 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302429 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002430 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002431
2432 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302433 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002434 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2435 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002436
2437 /* Any space left in the current big page for another frag? */
2438 if ((page_offset + rx_frag_size + rx_frag_size) >
2439 adapter->big_page_size) {
2440 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302441 page_info->last_frag = true;
2442 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2443 } else {
2444 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002445 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002446
2447 prev_page_info = page_info;
2448 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002449 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002450 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302451
2452 /* Mark the last frag of a page when we break out of the above loop
2453 * with no more slots available in the RXQ
2454 */
2455 if (pagep) {
2456 prev_page_info->last_frag = true;
2457 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2458 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002459
2460 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002461 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302462 if (rxo->rx_post_starved)
2463 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302464 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002465 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302466 be_rxq_notify(adapter, rxq->id, notify);
2467 posted -= notify;
2468 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002469 } else if (atomic_read(&rxq->used) == 0) {
2470 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002471 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002472 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002473}
2474
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302475static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002476{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302477 struct be_queue_info *tx_cq = &txo->cq;
2478 struct be_tx_compl_info *txcp = &txo->txcp;
2479 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302481 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002482 return NULL;
2483
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302484 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002485 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302486 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002487
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302488 txcp->status = GET_TX_COMPL_BITS(status, compl);
2489 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002490
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302491 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002492 queue_tail_inc(tx_cq);
2493 return txcp;
2494}
2495
Sathya Perla3c8def92011-06-12 20:01:58 +00002496static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302497 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002498{
Sathya Perla3c8def92011-06-12 20:01:58 +00002499 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002500 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002501 struct sk_buff *skb = NULL;
2502 bool unmap_skb_hdr = false;
2503 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302504 u16 num_wrbs = 0;
2505 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002506
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002507 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002508 if (sent_skbs[txq->tail]) {
2509 /* Free skb from prev req */
2510 if (skb)
2511 dev_consume_skb_any(skb);
2512 skb = sent_skbs[txq->tail];
2513 sent_skbs[txq->tail] = NULL;
2514 queue_tail_inc(txq); /* skip hdr wrb */
2515 num_wrbs++;
2516 unmap_skb_hdr = true;
2517 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002518 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002519 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002520 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002521 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002522 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002524 num_wrbs++;
2525 } while (frag_index != last_index);
2526 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002527
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002528 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002529}
2530
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002531/* Return the number of events in the event queue */
2532static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002533{
2534 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002535 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002536
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002537 do {
2538 eqe = queue_tail_node(&eqo->q);
2539 if (eqe->evt == 0)
2540 break;
2541
2542 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002543 eqe->evt = 0;
2544 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002545 queue_tail_inc(&eqo->q);
2546 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002547
2548 return num;
2549}
2550
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002551/* Leaves the EQ is disarmed state */
2552static void be_eq_clean(struct be_eq_obj *eqo)
2553{
2554 int num = events_get(eqo);
2555
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002556 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002557}
2558
Kalesh AP99b44302015-08-05 03:27:49 -04002559/* Free posted rx buffers that were not used */
2560static void be_rxq_clean(struct be_rx_obj *rxo)
2561{
2562 struct be_queue_info *rxq = &rxo->q;
2563 struct be_rx_page_info *page_info;
2564
2565 while (atomic_read(&rxq->used) > 0) {
2566 page_info = get_rx_page_info(rxo);
2567 put_page(page_info->page);
2568 memset(page_info, 0, sizeof(*page_info));
2569 }
2570 BUG_ON(atomic_read(&rxq->used));
2571 rxq->tail = 0;
2572 rxq->head = 0;
2573}
2574
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002575static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002576{
Sathya Perla3abcded2010-10-03 22:12:27 -07002577 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002578 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002579 struct be_adapter *adapter = rxo->adapter;
2580 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002581
Sathya Perlad23e9462012-12-17 19:38:51 +00002582 /* Consume pending rx completions.
2583 * Wait for the flush completion (identified by zero num_rcvd)
2584 * to arrive. Notify CQ even when there are no more CQ entries
2585 * for HW to flush partially coalesced CQ entries.
2586 * In Lancer, there is no need to wait for flush compl.
2587 */
2588 for (;;) {
2589 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302590 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002591 if (lancer_chip(adapter))
2592 break;
2593
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302594 if (flush_wait++ > 50 ||
2595 be_check_error(adapter,
2596 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002597 dev_warn(&adapter->pdev->dev,
2598 "did not receive flush compl\n");
2599 break;
2600 }
2601 be_cq_notify(adapter, rx_cq->id, true, 0);
2602 mdelay(1);
2603 } else {
2604 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002605 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002606 if (rxcp->num_rcvd == 0)
2607 break;
2608 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002609 }
2610
Sathya Perlad23e9462012-12-17 19:38:51 +00002611 /* After cleanup, leave the CQ in unarmed state */
2612 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002613}
2614
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002615static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002616{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002617 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302618 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302619 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002620 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302621 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302622 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002623 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002624
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302625 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002626 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002627 pending_txqs = adapter->num_tx_qs;
2628
2629 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302630 cmpl = 0;
2631 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002632 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302633 while ((txcp = be_tx_compl_get(txo))) {
2634 num_wrbs +=
2635 be_tx_compl_process(adapter, txo,
2636 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002637 cmpl++;
2638 }
2639 if (cmpl) {
2640 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2641 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302642 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002643 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302644 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002645 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002646 }
2647
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302648 if (pending_txqs == 0 || ++timeo > 10 ||
2649 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002650 break;
2651
2652 mdelay(1);
2653 } while (true);
2654
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002655 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002656 for_all_tx_queues(adapter, txo, i) {
2657 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002658
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002659 if (atomic_read(&txq->used)) {
2660 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2661 i, atomic_read(&txq->used));
2662 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002663 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002664 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2665 txq->len);
2666 /* Use the tx-compl process logic to handle requests
2667 * that were not sent to the HW.
2668 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002669 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2670 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002671 BUG_ON(atomic_read(&txq->used));
2672 txo->pend_wrb_cnt = 0;
2673 /* Since hw was never notified of these requests,
2674 * reset TXQ indices
2675 */
2676 txq->head = notified_idx;
2677 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002678 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002679 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002680}
2681
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002682static void be_evt_queues_destroy(struct be_adapter *adapter)
2683{
2684 struct be_eq_obj *eqo;
2685 int i;
2686
2687 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002688 if (eqo->q.created) {
2689 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002690 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302691 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302692 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002693 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002694 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002695 be_queue_free(adapter, &eqo->q);
2696 }
2697}
2698
2699static int be_evt_queues_create(struct be_adapter *adapter)
2700{
2701 struct be_queue_info *eq;
2702 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302703 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002704 int i, rc;
2705
Sathya Perlae2617682016-06-22 08:54:54 -04002706 /* need enough EQs to service both RX and TX queues */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302707 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
Sathya Perlae2617682016-06-22 08:54:54 -04002708 max(adapter->cfg_num_rx_irqs,
2709 adapter->cfg_num_tx_irqs));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002710
2711 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302712 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002713
Sathya Perla2632baf2013-10-01 16:00:00 +05302714 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002716 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302717 aic->max_eqd = BE_MAX_EQD;
2718 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719
2720 eq = &eqo->q;
2721 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302722 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002723 if (rc)
2724 return rc;
2725
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302726 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002727 if (rc)
2728 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002729
2730 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2731 return -ENOMEM;
2732 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2733 eqo->affinity_mask);
2734 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2735 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002736 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002737 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002738}
2739
Sathya Perla5fb379e2009-06-18 00:02:59 +00002740static void be_mcc_queues_destroy(struct be_adapter *adapter)
2741{
2742 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002743
Sathya Perla8788fdc2009-07-27 22:52:03 +00002744 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002745 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002746 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002747 be_queue_free(adapter, q);
2748
Sathya Perla8788fdc2009-07-27 22:52:03 +00002749 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002750 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002751 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002752 be_queue_free(adapter, q);
2753}
2754
2755/* Must be called only after TX qs are created as MCC shares TX EQ */
2756static int be_mcc_queues_create(struct be_adapter *adapter)
2757{
2758 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002759
Sathya Perla8788fdc2009-07-27 22:52:03 +00002760 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002761 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302762 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002763 goto err;
2764
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002765 /* Use the default EQ for MCC completions */
2766 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002767 goto mcc_cq_free;
2768
Sathya Perla8788fdc2009-07-27 22:52:03 +00002769 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002770 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2771 goto mcc_cq_destroy;
2772
Sathya Perla8788fdc2009-07-27 22:52:03 +00002773 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002774 goto mcc_q_free;
2775
2776 return 0;
2777
2778mcc_q_free:
2779 be_queue_free(adapter, q);
2780mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002781 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002782mcc_cq_free:
2783 be_queue_free(adapter, cq);
2784err:
2785 return -1;
2786}
2787
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002788static void be_tx_queues_destroy(struct be_adapter *adapter)
2789{
2790 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002791 struct be_tx_obj *txo;
2792 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002793
Sathya Perla3c8def92011-06-12 20:01:58 +00002794 for_all_tx_queues(adapter, txo, i) {
2795 q = &txo->q;
2796 if (q->created)
2797 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2798 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002799
Sathya Perla3c8def92011-06-12 20:01:58 +00002800 q = &txo->cq;
2801 if (q->created)
2802 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2803 be_queue_free(adapter, q);
2804 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002805}
2806
Sathya Perla77071332013-08-27 16:57:34 +05302807static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002808{
Sathya Perla73f394e2015-03-26 03:05:09 -04002809 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002810 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002811 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302812 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002813
Sathya Perlae2617682016-06-22 08:54:54 -04002814 adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
Sathya Perladafc0fe2011-10-24 02:45:02 +00002815
Sathya Perla3c8def92011-06-12 20:01:58 +00002816 for_all_tx_queues(adapter, txo, i) {
2817 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002818 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2819 sizeof(struct be_eth_tx_compl));
2820 if (status)
2821 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002822
John Stultz827da442013-10-07 15:51:58 -07002823 u64_stats_init(&txo->stats.sync);
2824 u64_stats_init(&txo->stats.sync_compl);
2825
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002826 /* If num_evt_qs is less than num_tx_qs, then more than
2827 * one txq share an eq
2828 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002829 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2830 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002831 if (status)
2832 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002833
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002834 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2835 sizeof(struct be_eth_wrb));
2836 if (status)
2837 return status;
2838
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002839 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002840 if (status)
2841 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002842
2843 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2844 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002845 }
2846
Sathya Perlad3791422012-09-28 04:39:44 +00002847 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2848 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002849 return 0;
2850}
2851
2852static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853{
2854 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002855 struct be_rx_obj *rxo;
2856 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002857
Sathya Perla3abcded2010-10-03 22:12:27 -07002858 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002859 q = &rxo->cq;
2860 if (q->created)
2861 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2862 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002863 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002864}
2865
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002866static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002867{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002868 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002869 struct be_rx_obj *rxo;
2870 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002871
Sathya Perlae2617682016-06-22 08:54:54 -04002872 adapter->num_rss_qs =
2873 min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
Sathya Perla92bf14a2013-08-27 16:57:32 +05302874
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002875 /* We'll use RSS only if atleast 2 RSS rings are supported. */
Sathya Perlae2617682016-06-22 08:54:54 -04002876 if (adapter->num_rss_qs < 2)
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002877 adapter->num_rss_qs = 0;
2878
2879 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2880
2881 /* When the interface is not capable of RSS rings (and there is no
2882 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002883 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002884 if (adapter->num_rx_qs == 0)
2885 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302886
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002887 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002888 for_all_rx_queues(adapter, rxo, i) {
2889 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002890 cq = &rxo->cq;
2891 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302892 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002893 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002894 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002895
John Stultz827da442013-10-07 15:51:58 -07002896 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002897 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2898 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002899 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002900 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002901 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002902
Sathya Perlad3791422012-09-28 04:39:44 +00002903 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002904 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002905 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002906}
2907
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002908static irqreturn_t be_intx(int irq, void *dev)
2909{
Sathya Perlae49cc342012-11-27 19:50:02 +00002910 struct be_eq_obj *eqo = dev;
2911 struct be_adapter *adapter = eqo->adapter;
2912 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002913
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002914 /* IRQ is not expected when NAPI is scheduled as the EQ
2915 * will not be armed.
2916 * But, this can happen on Lancer INTx where it takes
2917 * a while to de-assert INTx or in BE2 where occasionaly
2918 * an interrupt may be raised even when EQ is unarmed.
2919 * If NAPI is already scheduled, then counting & notifying
2920 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002921 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002922 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002923 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002924 __napi_schedule(&eqo->napi);
2925 if (num_evts)
2926 eqo->spurious_intr = 0;
2927 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002928 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002929
2930 /* Return IRQ_HANDLED only for the the first spurious intr
2931 * after a valid intr to stop the kernel from branding
2932 * this irq as a bad one!
2933 */
2934 if (num_evts || eqo->spurious_intr++ == 0)
2935 return IRQ_HANDLED;
2936 else
2937 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002938}
2939
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002940static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002941{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002942 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002943
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002944 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002945 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002946 return IRQ_HANDLED;
2947}
2948
Sathya Perla2e588f82011-03-11 02:49:26 +00002949static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002950{
Somnath Koture38b1702013-05-29 22:55:56 +00002951 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002952}
2953
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002954static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302955 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002956{
Sathya Perla3abcded2010-10-03 22:12:27 -07002957 struct be_adapter *adapter = rxo->adapter;
2958 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002959 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002960 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302961 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002962
2963 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002964 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002965 if (!rxcp)
2966 break;
2967
Sathya Perla12004ae2011-08-02 19:57:46 +00002968 /* Is it a flush compl that has no data */
2969 if (unlikely(rxcp->num_rcvd == 0))
2970 goto loop_continue;
2971
2972 /* Discard compl with partial DMA Lancer B0 */
2973 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002974 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002975 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002976 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002977
Sathya Perla12004ae2011-08-02 19:57:46 +00002978 /* On BE drop pkts that arrive due to imperfect filtering in
2979 * promiscuous mode on some skews
2980 */
2981 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302982 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002983 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002984 goto loop_continue;
2985 }
2986
Sathya Perla6384a4d2013-10-25 10:40:16 +05302987 /* Don't do gro when we're busy_polling */
2988 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002989 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002990 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302991 be_rx_compl_process(rxo, napi, rxcp);
2992
Sathya Perla12004ae2011-08-02 19:57:46 +00002993loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302994 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002995 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002996 }
2997
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002998 if (work_done) {
2999 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00003000
Sathya Perla6384a4d2013-10-25 10:40:16 +05303001 /* When an rx-obj gets into post_starved state, just
3002 * let be_worker do the posting.
3003 */
3004 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3005 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05303006 be_post_rx_frags(rxo, GFP_ATOMIC,
3007 max_t(u32, MAX_RX_POST,
3008 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003009 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003010
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003011 return work_done;
3012}
3013
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303014static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303015{
3016 switch (status) {
3017 case BE_TX_COMP_HDR_PARSE_ERR:
3018 tx_stats(txo)->tx_hdr_parse_err++;
3019 break;
3020 case BE_TX_COMP_NDMA_ERR:
3021 tx_stats(txo)->tx_dma_err++;
3022 break;
3023 case BE_TX_COMP_ACL_ERR:
3024 tx_stats(txo)->tx_spoof_check_err++;
3025 break;
3026 }
3027}
3028
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303029static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05303030{
3031 switch (status) {
3032 case LANCER_TX_COMP_LSO_ERR:
3033 tx_stats(txo)->tx_tso_err++;
3034 break;
3035 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
3036 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
3037 tx_stats(txo)->tx_spoof_check_err++;
3038 break;
3039 case LANCER_TX_COMP_QINQ_ERR:
3040 tx_stats(txo)->tx_qinq_err++;
3041 break;
3042 case LANCER_TX_COMP_PARITY_ERR:
3043 tx_stats(txo)->tx_internal_parity_err++;
3044 break;
3045 case LANCER_TX_COMP_DMA_ERR:
3046 tx_stats(txo)->tx_dma_err++;
3047 break;
3048 }
3049}
3050
Sathya Perlac8f64612014-09-02 09:56:55 +05303051static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3052 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003053{
Sathya Perlac8f64612014-09-02 09:56:55 +05303054 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303055 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303057 while ((txcp = be_tx_compl_get(txo))) {
3058 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05303059 work_done++;
3060
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303061 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05303062 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303063 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303064 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05303065 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05303066 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003067 }
3068
3069 if (work_done) {
3070 be_cq_notify(adapter, txo->cq.id, true, work_done);
3071 atomic_sub(num_wrbs, &txo->q.used);
3072
3073 /* As Tx wrbs have been freed up, wake up netdev queue
3074 * if it was stopped due to lack of tx wrbs. */
3075 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05303076 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003077 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00003078 }
Sathya Perla3c8def92011-06-12 20:01:58 +00003079
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003080 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3081 tx_stats(txo)->tx_compl += work_done;
3082 u64_stats_update_end(&tx_stats(txo)->sync_compl);
3083 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003084}
Sathya Perla3c8def92011-06-12 20:01:58 +00003085
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003086#ifdef CONFIG_NET_RX_BUSY_POLL
3087static inline bool be_lock_napi(struct be_eq_obj *eqo)
3088{
3089 bool status = true;
3090
3091 spin_lock(&eqo->lock); /* BH is already disabled */
3092 if (eqo->state & BE_EQ_LOCKED) {
3093 WARN_ON(eqo->state & BE_EQ_NAPI);
3094 eqo->state |= BE_EQ_NAPI_YIELD;
3095 status = false;
3096 } else {
3097 eqo->state = BE_EQ_NAPI;
3098 }
3099 spin_unlock(&eqo->lock);
3100 return status;
3101}
3102
3103static inline void be_unlock_napi(struct be_eq_obj *eqo)
3104{
3105 spin_lock(&eqo->lock); /* BH is already disabled */
3106
3107 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3108 eqo->state = BE_EQ_IDLE;
3109
3110 spin_unlock(&eqo->lock);
3111}
3112
3113static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3114{
3115 bool status = true;
3116
3117 spin_lock_bh(&eqo->lock);
3118 if (eqo->state & BE_EQ_LOCKED) {
3119 eqo->state |= BE_EQ_POLL_YIELD;
3120 status = false;
3121 } else {
3122 eqo->state |= BE_EQ_POLL;
3123 }
3124 spin_unlock_bh(&eqo->lock);
3125 return status;
3126}
3127
3128static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3129{
3130 spin_lock_bh(&eqo->lock);
3131
3132 WARN_ON(eqo->state & (BE_EQ_NAPI));
3133 eqo->state = BE_EQ_IDLE;
3134
3135 spin_unlock_bh(&eqo->lock);
3136}
3137
3138static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3139{
3140 spin_lock_init(&eqo->lock);
3141 eqo->state = BE_EQ_IDLE;
3142}
3143
3144static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3145{
3146 local_bh_disable();
3147
3148 /* It's enough to just acquire napi lock on the eqo to stop
3149 * be_busy_poll() from processing any queueus.
3150 */
3151 while (!be_lock_napi(eqo))
3152 mdelay(1);
3153
3154 local_bh_enable();
3155}
3156
3157#else /* CONFIG_NET_RX_BUSY_POLL */
3158
3159static inline bool be_lock_napi(struct be_eq_obj *eqo)
3160{
3161 return true;
3162}
3163
3164static inline void be_unlock_napi(struct be_eq_obj *eqo)
3165{
3166}
3167
3168static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3169{
3170 return false;
3171}
3172
3173static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3174{
3175}
3176
3177static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3178{
3179}
3180
3181static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3182{
3183}
3184#endif /* CONFIG_NET_RX_BUSY_POLL */
3185
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303186int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003187{
3188 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3189 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003190 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303191 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303192 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003193 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003194
Sathya Perla0b545a62012-11-23 00:27:18 +00003195 num_evts = events_get(eqo);
3196
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303197 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3198 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003199
Sathya Perla6384a4d2013-10-25 10:40:16 +05303200 if (be_lock_napi(eqo)) {
3201 /* This loop will iterate twice for EQ0 in which
3202 * completions of the last RXQ (default one) are also processed
3203 * For other EQs the loop iterates only once
3204 */
3205 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3206 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3207 max_work = max(work, max_work);
3208 }
3209 be_unlock_napi(eqo);
3210 } else {
3211 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003212 }
3213
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003214 if (is_mcc_eqo(eqo))
3215 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003216
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003217 if (max_work < budget) {
3218 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003219
3220 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3221 * delay via a delay multiplier encoding value
3222 */
3223 if (skyhawk_chip(adapter))
3224 mult_enc = be_get_eq_delay_mult_enc(eqo);
3225
3226 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3227 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003228 } else {
3229 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003230 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003231 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003232 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003233}
3234
Sathya Perla6384a4d2013-10-25 10:40:16 +05303235#ifdef CONFIG_NET_RX_BUSY_POLL
3236static int be_busy_poll(struct napi_struct *napi)
3237{
3238 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3239 struct be_adapter *adapter = eqo->adapter;
3240 struct be_rx_obj *rxo;
3241 int i, work = 0;
3242
3243 if (!be_lock_busy_poll(eqo))
3244 return LL_FLUSH_BUSY;
3245
3246 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3247 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3248 if (work)
3249 break;
3250 }
3251
3252 be_unlock_busy_poll(eqo);
3253 return work;
3254}
3255#endif
3256
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003257void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003258{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003259 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3260 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003261 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303262 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003263
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303264 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003265 return;
3266
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003267 if (lancer_chip(adapter)) {
3268 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3269 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303270 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003271 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303272 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003273 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303274 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303275 /* Do not log error messages if its a FW reset */
3276 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3277 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3278 dev_info(dev, "Firmware update in progress\n");
3279 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303280 dev_err(dev, "Error detected in the card\n");
3281 dev_err(dev, "ERR: sliport status 0x%x\n",
3282 sliport_status);
3283 dev_err(dev, "ERR: sliport error1 0x%x\n",
3284 sliport_err1);
3285 dev_err(dev, "ERR: sliport error2 0x%x\n",
3286 sliport_err2);
3287 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003288 }
3289 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003290 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3291 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3292 ue_lo_mask = ioread32(adapter->pcicfg +
3293 PCICFG_UE_STATUS_LOW_MASK);
3294 ue_hi_mask = ioread32(adapter->pcicfg +
3295 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003296
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003297 ue_lo = (ue_lo & ~ue_lo_mask);
3298 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003299
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303300 /* On certain platforms BE hardware can indicate spurious UEs.
3301 * Allow HW to stop working completely in case of a real UE.
3302 * Hence not setting the hw_error for UE detection.
3303 */
3304
3305 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303306 dev_err(dev,
3307 "Unrecoverable Error detected in the adapter");
3308 dev_err(dev, "Please reboot server to recover");
3309 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303310 be_set_error(adapter, BE_ERROR_UE);
3311
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303312 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3313 if (ue_lo & 1)
3314 dev_err(dev, "UE: %s bit set\n",
3315 ue_status_low_desc[i]);
3316 }
3317 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3318 if (ue_hi & 1)
3319 dev_err(dev, "UE: %s bit set\n",
3320 ue_status_hi_desc[i]);
3321 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303322 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003323 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003324}
3325
Sathya Perla8d56ff12009-11-22 22:02:26 +00003326static void be_msix_disable(struct be_adapter *adapter)
3327{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003328 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003329 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003330 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303331 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003332 }
3333}
3334
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003335static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003336{
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003337 unsigned int i, max_roce_eqs;
Sathya Perlad3791422012-09-28 04:39:44 +00003338 struct device *dev = &adapter->pdev->dev;
Dan Carpenter6fde0e62016-06-29 17:39:43 +03003339 int num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003340
Sathya Perlace7faf02016-06-22 08:54:53 -04003341 /* If RoCE is supported, program the max number of vectors that
3342 * could be used for NIC and RoCE, else, just program the number
3343 * we'll use initially.
Sathya Perla92bf14a2013-08-27 16:57:32 +05303344 */
Sathya Perlae2617682016-06-22 08:54:54 -04003345 if (be_roce_supported(adapter)) {
3346 max_roce_eqs =
3347 be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3348 max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3349 num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3350 } else {
3351 num_vec = max(adapter->cfg_num_rx_irqs,
3352 adapter->cfg_num_tx_irqs);
3353 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003354
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003355 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003356 adapter->msix_entries[i].entry = i;
3357
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003358 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3359 MIN_MSIX_VECTORS, num_vec);
3360 if (num_vec < 0)
3361 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003362
Sathya Perla92bf14a2013-08-27 16:57:32 +05303363 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3364 adapter->num_msix_roce_vec = num_vec / 2;
3365 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3366 adapter->num_msix_roce_vec);
3367 }
3368
3369 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3370
3371 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3372 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003373 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003374
3375fail:
3376 dev_warn(dev, "MSIx enable failed\n");
3377
3378 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003379 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003380 return num_vec;
3381 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003382}
3383
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003384static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303385 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003386{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303387 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003388}
3389
3390static int be_msix_register(struct be_adapter *adapter)
3391{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003392 struct net_device *netdev = adapter->netdev;
3393 struct be_eq_obj *eqo;
3394 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003395
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003396 for_all_evt_queues(adapter, eqo, i) {
3397 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3398 vec = be_msix_vec_get(adapter, eqo);
3399 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003400 if (status)
3401 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003402
3403 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003404 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003405
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003406 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003407err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303408 for (i--; i >= 0; i--) {
3409 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003410 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303411 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003412 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303413 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003414 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003415 return status;
3416}
3417
3418static int be_irq_register(struct be_adapter *adapter)
3419{
3420 struct net_device *netdev = adapter->netdev;
3421 int status;
3422
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003423 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003424 status = be_msix_register(adapter);
3425 if (status == 0)
3426 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003427 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003428 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003429 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003430 }
3431
Sathya Perlae49cc342012-11-27 19:50:02 +00003432 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003433 netdev->irq = adapter->pdev->irq;
3434 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003435 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003436 if (status) {
3437 dev_err(&adapter->pdev->dev,
3438 "INTx request IRQ failed - err %d\n", status);
3439 return status;
3440 }
3441done:
3442 adapter->isr_registered = true;
3443 return 0;
3444}
3445
3446static void be_irq_unregister(struct be_adapter *adapter)
3447{
3448 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003449 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003450 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003451
3452 if (!adapter->isr_registered)
3453 return;
3454
3455 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003456 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003457 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003458 goto done;
3459 }
3460
3461 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003462 for_all_evt_queues(adapter, eqo, i) {
3463 vec = be_msix_vec_get(adapter, eqo);
3464 irq_set_affinity_hint(vec, NULL);
3465 free_irq(vec, eqo);
3466 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003467
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003468done:
3469 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003470}
3471
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003472static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003473{
Ajit Khaparde62219062016-02-10 22:45:53 +05303474 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003475 struct be_queue_info *q;
3476 struct be_rx_obj *rxo;
3477 int i;
3478
3479 for_all_rx_queues(adapter, rxo, i) {
3480 q = &rxo->q;
3481 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003482 /* If RXQs are destroyed while in an "out of buffer"
3483 * state, there is a possibility of an HW stall on
3484 * Lancer. So, post 64 buffers to each queue to relieve
3485 * the "out of buffer" condition.
3486 * Make sure there's space in the RXQ before posting.
3487 */
3488 if (lancer_chip(adapter)) {
3489 be_rx_cq_clean(rxo);
3490 if (atomic_read(&q->used) == 0)
3491 be_post_rx_frags(rxo, GFP_KERNEL,
3492 MAX_RX_POST);
3493 }
3494
Sathya Perla482c9e72011-06-29 23:33:17 +00003495 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003496 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003497 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003498 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003499 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003500 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303501
3502 if (rss->rss_flags) {
3503 rss->rss_flags = RSS_ENABLE_NONE;
3504 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3505 128, rss->rss_hkey);
3506 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003507}
3508
Kalesh APbcc84142015-08-05 03:27:48 -04003509static void be_disable_if_filters(struct be_adapter *adapter)
3510{
3511 be_cmd_pmac_del(adapter, adapter->if_handle,
3512 adapter->pmac_id[0], 0);
3513
3514 be_clear_uc_list(adapter);
Sriharsha Basavapatna92fbb1d2016-07-27 05:26:17 -04003515 be_clear_mc_list(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003516
3517 /* The IFACE flags are enabled in the open path and cleared
3518 * in the close path. When a VF gets detached from the host and
3519 * assigned to a VM the following happens:
3520 * - VF's IFACE flags get cleared in the detach path
3521 * - IFACE create is issued by the VF in the attach path
3522 * Due to a bug in the BE3/Skyhawk-R FW
3523 * (Lancer FW doesn't have the bug), the IFACE capability flags
3524 * specified along with the IFACE create cmd issued by a VF are not
3525 * honoured by FW. As a consequence, if a *new* driver
3526 * (that enables/disables IFACE flags in open/close)
3527 * is loaded in the host and an *old* driver is * used by a VM/VF,
3528 * the IFACE gets created *without* the needed flags.
3529 * To avoid this, disable RX-filter flags only for Lancer.
3530 */
3531 if (lancer_chip(adapter)) {
3532 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3533 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3534 }
3535}
3536
Sathya Perla889cd4b2010-05-30 23:33:45 +00003537static int be_close(struct net_device *netdev)
3538{
3539 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003540 struct be_eq_obj *eqo;
3541 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003542
Kalesh APe1ad8e32014-04-14 16:12:41 +05303543 /* This protection is needed as be_close() may be called even when the
3544 * adapter is in cleared state (after eeh perm failure)
3545 */
3546 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3547 return 0;
3548
Kalesh APbcc84142015-08-05 03:27:48 -04003549 be_disable_if_filters(adapter);
3550
Ivan Veceradff345c52013-11-27 08:59:32 +01003551 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3552 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003553 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303554 be_disable_busy_poll(eqo);
3555 }
David S. Miller71237b62013-11-28 18:53:36 -05003556 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003557 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003558
3559 be_async_mcc_disable(adapter);
3560
3561 /* Wait for all pending tx completions to arrive so that
3562 * all tx skbs are freed.
3563 */
Sathya Perlafba87552013-05-08 02:05:50 +00003564 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303565 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003566
3567 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003568
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003569 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003570 if (msix_enabled(adapter))
3571 synchronize_irq(be_msix_vec_get(adapter, eqo));
3572 else
3573 synchronize_irq(netdev->irq);
3574 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003575 }
3576
Sathya Perla889cd4b2010-05-30 23:33:45 +00003577 be_irq_unregister(adapter);
3578
Sathya Perla482c9e72011-06-29 23:33:17 +00003579 return 0;
3580}
3581
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003582static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003583{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003584 struct rss_info *rss = &adapter->rss_info;
3585 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003586 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003587 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003588
3589 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003590 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3591 sizeof(struct be_eth_rx_d));
3592 if (rc)
3593 return rc;
3594 }
3595
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003596 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3597 rxo = default_rxo(adapter);
3598 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3599 rx_frag_size, adapter->if_handle,
3600 false, &rxo->rss_id);
3601 if (rc)
3602 return rc;
3603 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003604
3605 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003606 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003607 rx_frag_size, adapter->if_handle,
3608 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003609 if (rc)
3610 return rc;
3611 }
3612
3613 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003614 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003615 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303616 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003617 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303618 rss->rsstable[j + i] = rxo->rss_id;
3619 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003620 }
3621 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303622 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3623 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003624
3625 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303626 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3627 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303628
3629 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3630 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3631 RSS_INDIR_TABLE_LEN, rss_key);
3632 if (rc) {
3633 rss->rss_flags = RSS_ENABLE_NONE;
3634 return rc;
3635 }
3636
3637 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303638 } else {
3639 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303640 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303641 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003642
Venkata Duvvurue2557872014-04-21 15:38:00 +05303643
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003644 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3645 * which is a queue empty condition
3646 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003647 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003648 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3649
Sathya Perla889cd4b2010-05-30 23:33:45 +00003650 return 0;
3651}
3652
Kalesh APbcc84142015-08-05 03:27:48 -04003653static int be_enable_if_filters(struct be_adapter *adapter)
3654{
3655 int status;
3656
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003657 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003658 if (status)
3659 return status;
3660
3661 /* For BE3 VFs, the PF programs the initial MAC address */
3662 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3663 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3664 adapter->if_handle,
3665 &adapter->pmac_id[0], 0);
3666 if (status)
3667 return status;
3668 }
3669
3670 if (adapter->vlans_added)
3671 be_vid_config(adapter);
3672
3673 be_set_rx_mode(adapter->netdev);
3674
3675 return 0;
3676}
3677
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003678static int be_open(struct net_device *netdev)
3679{
3680 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003681 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003682 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003683 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003684 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003685 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003686
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003687 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003688 if (status)
3689 goto err;
3690
Kalesh APbcc84142015-08-05 03:27:48 -04003691 status = be_enable_if_filters(adapter);
3692 if (status)
3693 goto err;
3694
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003695 status = be_irq_register(adapter);
3696 if (status)
3697 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003698
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003699 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003700 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003701
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003702 for_all_tx_queues(adapter, txo, i)
3703 be_cq_notify(adapter, txo->cq.id, true, 0);
3704
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003705 be_async_mcc_enable(adapter);
3706
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003707 for_all_evt_queues(adapter, eqo, i) {
3708 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303709 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003710 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003711 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003712 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003713
Sathya Perla323ff712012-09-28 04:39:43 +00003714 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003715 if (!status)
3716 be_link_status_update(adapter, link_status);
3717
Sathya Perlafba87552013-05-08 02:05:50 +00003718 netif_tx_start_all_queues(netdev);
Sathya Perlac9c47142014-03-27 10:46:19 +05303719 if (skyhawk_chip(adapter))
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07003720 udp_tunnel_get_rx_info(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303721
Sathya Perla889cd4b2010-05-30 23:33:45 +00003722 return 0;
3723err:
3724 be_close(adapter->netdev);
3725 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003726}
3727
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003728static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3729{
3730 u32 addr;
3731
3732 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3733
3734 mac[5] = (u8)(addr & 0xFF);
3735 mac[4] = (u8)((addr >> 8) & 0xFF);
3736 mac[3] = (u8)((addr >> 16) & 0xFF);
3737 /* Use the OUI from the current MAC address */
3738 memcpy(mac, adapter->netdev->dev_addr, 3);
3739}
3740
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003741/*
3742 * Generate a seed MAC address from the PF MAC Address using jhash.
3743 * MAC Address for VFs are assigned incrementally starting from the seed.
3744 * These addresses are programmed in the ASIC by the PF and the VF driver
3745 * queries for the MAC address during its probe.
3746 */
Sathya Perla4c876612013-02-03 20:30:11 +00003747static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003748{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003749 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003750 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003751 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003752 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003753
3754 be_vf_eth_addr_generate(adapter, mac);
3755
Sathya Perla11ac75e2011-12-13 00:58:50 +00003756 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303757 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003758 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003759 vf_cfg->if_handle,
3760 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303761 else
3762 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3763 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003764
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003765 if (status)
3766 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303767 "Mac address assignment failed for VF %d\n",
3768 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003769 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003770 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003771
3772 mac[5] += 1;
3773 }
3774 return status;
3775}
3776
Sathya Perla4c876612013-02-03 20:30:11 +00003777static int be_vfs_mac_query(struct be_adapter *adapter)
3778{
3779 int status, vf;
3780 u8 mac[ETH_ALEN];
3781 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003782
3783 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303784 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3785 mac, vf_cfg->if_handle,
3786 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003787 if (status)
3788 return status;
3789 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3790 }
3791 return 0;
3792}
3793
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003794static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003795{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003796 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003797 u32 vf;
3798
Sathya Perla257a3fe2013-06-14 15:54:51 +05303799 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003800 dev_warn(&adapter->pdev->dev,
3801 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003802 goto done;
3803 }
3804
Sathya Perlab4c1df92013-05-08 02:05:47 +00003805 pci_disable_sriov(adapter->pdev);
3806
Sathya Perla11ac75e2011-12-13 00:58:50 +00003807 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303808 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003809 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3810 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303811 else
3812 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3813 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003814
Sathya Perla11ac75e2011-12-13 00:58:50 +00003815 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3816 }
Somnath Kotur884476b2016-06-22 08:54:55 -04003817
3818 if (BE3_chip(adapter))
3819 be_cmd_set_hsw_config(adapter, 0, 0,
3820 adapter->if_handle,
3821 PORT_FWD_TYPE_PASSTHRU, 0);
Sathya Perla39f1d942012-05-08 19:41:24 +00003822done:
3823 kfree(adapter->vf_cfg);
3824 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303825 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003826}
3827
Sathya Perla77071332013-08-27 16:57:34 +05303828static void be_clear_queues(struct be_adapter *adapter)
3829{
3830 be_mcc_queues_destroy(adapter);
3831 be_rx_cqs_destroy(adapter);
3832 be_tx_queues_destroy(adapter);
3833 be_evt_queues_destroy(adapter);
3834}
3835
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303836static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003837{
Sathya Perla191eb752012-02-23 18:50:13 +00003838 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3839 cancel_delayed_work_sync(&adapter->work);
3840 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3841 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303842}
3843
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003844static void be_cancel_err_detection(struct be_adapter *adapter)
3845{
3846 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3847 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3848 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3849 }
3850}
3851
Sathya Perlac9c47142014-03-27 10:46:19 +05303852static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3853{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003854 struct net_device *netdev = adapter->netdev;
3855
Sathya Perlac9c47142014-03-27 10:46:19 +05303856 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3857 be_cmd_manage_iface(adapter, adapter->if_handle,
3858 OP_CONVERT_TUNNEL_TO_NORMAL);
3859
3860 if (adapter->vxlan_port)
3861 be_cmd_set_vxlan_port(adapter, 0);
3862
3863 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3864 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003865
3866 netdev->hw_enc_features = 0;
3867 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303868 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303869}
3870
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003871static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
3872 struct be_resources *vft_res)
Vasundhara Volamf2858732015-03-04 00:44:33 -05003873{
3874 struct be_resources res = adapter->pool_res;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003875 u32 vf_if_cap_flags = res.vf_if_cap_flags;
3876 struct be_resources res_mod = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003877 u16 num_vf_qs = 1;
3878
Somnath Koturde2b1e02016-06-06 07:22:10 -04003879 /* Distribute the queue resources among the PF and it's VFs */
3880 if (num_vfs) {
3881 /* Divide the rx queues evenly among the VFs and the PF, capped
3882 * at VF-EQ-count. Any remainder queues belong to the PF.
3883 */
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05303884 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3885 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05003886
Somnath Koturde2b1e02016-06-06 07:22:10 -04003887 /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
3888 * RSS Tables per port. Provide RSS on VFs, only if number of
3889 * VFs requested is less than it's PF Pool's RSS Tables limit.
Vasundhara Volamf2858732015-03-04 00:44:33 -05003890 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04003891 if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
Vasundhara Volamf2858732015-03-04 00:44:33 -05003892 num_vf_qs = 1;
3893 }
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003894
3895 /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
3896 * which are modifiable using SET_PROFILE_CONFIG cmd.
3897 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04003898 be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
3899 RESOURCE_MODIFIABLE, 0);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003900
3901 /* If RSS IFACE capability flags are modifiable for a VF, set the
3902 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
3903 * more than 1 RSSQ is available for a VF.
3904 * Otherwise, provision only 1 queue pair for VF.
3905 */
3906 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
3907 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3908 if (num_vf_qs > 1) {
3909 vf_if_cap_flags |= BE_IF_FLAGS_RSS;
3910 if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
3911 vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
3912 } else {
3913 vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
3914 BE_IF_FLAGS_DEFQ_RSS);
3915 }
3916 } else {
3917 num_vf_qs = 1;
3918 }
3919
3920 if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
3921 vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
3922 vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3923 }
3924
3925 vft_res->vf_if_cap_flags = vf_if_cap_flags;
3926 vft_res->max_rx_qs = num_vf_qs;
3927 vft_res->max_rss_qs = num_vf_qs;
3928 vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
3929 vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
3930
3931 /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
3932 * among the PF and it's VFs, if the fields are changeable
3933 */
3934 if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
3935 vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
3936
3937 if (res_mod.max_vlans == FIELD_MODIFIABLE)
3938 vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
3939
3940 if (res_mod.max_iface_count == FIELD_MODIFIABLE)
3941 vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
3942
3943 if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
3944 vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
Vasundhara Volamf2858732015-03-04 00:44:33 -05003945}
3946
Somnath Koturb05004a2013-12-05 12:08:16 +05303947static int be_clear(struct be_adapter *adapter)
3948{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003949 struct pci_dev *pdev = adapter->pdev;
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003950 struct be_resources vft_res = {0};
Vasundhara Volamf2858732015-03-04 00:44:33 -05003951
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303952 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003953
Sathya Perla11ac75e2011-12-13 00:58:50 +00003954 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003955 be_vf_clear(adapter);
3956
Vasundhara Volambec84e62014-06-30 13:01:32 +05303957 /* Re-configure FW to distribute resources evenly across max-supported
3958 * number of VFs, only when VFs are not already enabled.
3959 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003960 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3961 !pci_vfs_assigned(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003962 be_calculate_vf_res(adapter,
3963 pci_sriov_get_totalvfs(pdev),
3964 &vft_res);
Vasundhara Volambec84e62014-06-30 13:01:32 +05303965 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003966 pci_sriov_get_totalvfs(pdev),
Suresh Reddyb9263cb2016-06-06 07:22:08 -04003967 &vft_res);
Vasundhara Volamf2858732015-03-04 00:44:33 -05003968 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303969
Sathya Perlac9c47142014-03-27 10:46:19 +05303970 be_disable_vxlan_offloads(adapter);
Kalesh APbcc84142015-08-05 03:27:48 -04003971 kfree(adapter->pmac_id);
3972 adapter->pmac_id = NULL;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003973
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003974 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003975
Sathya Perla77071332013-08-27 16:57:34 +05303976 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003977
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003978 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303979 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003980 return 0;
3981}
3982
Sathya Perla4c876612013-02-03 20:30:11 +00003983static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003984{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303985 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04003986 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00003987 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003988 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003989
Kalesh AP0700d812015-01-20 03:51:43 -05003990 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003991 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003992
Sathya Perla4c876612013-02-03 20:30:11 +00003993 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303994 if (!BE3_chip(adapter)) {
Somnath Koturde2b1e02016-06-06 07:22:10 -04003995 status = be_cmd_get_profile_config(adapter, &res, NULL,
3996 ACTIVE_PROFILE_TYPE,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003997 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303998 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003999 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304000 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04004001 /* Prevent VFs from enabling VLAN promiscuous
4002 * mode
4003 */
4004 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4005 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304006 }
Sathya Perla4c876612013-02-03 20:30:11 +00004007
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05004008 /* PF should enable IF flags during proxy if_create call */
4009 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04004010 status = be_cmd_if_create(adapter, cap_flags, en_flags,
4011 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00004012 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05004013 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00004014 }
Kalesh AP0700d812015-01-20 03:51:43 -05004015
4016 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004017}
4018
Sathya Perla39f1d942012-05-08 19:41:24 +00004019static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00004020{
Sathya Perla11ac75e2011-12-13 00:58:50 +00004021 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00004022 int vf;
4023
Sathya Perla39f1d942012-05-08 19:41:24 +00004024 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4025 GFP_KERNEL);
4026 if (!adapter->vf_cfg)
4027 return -ENOMEM;
4028
Sathya Perla11ac75e2011-12-13 00:58:50 +00004029 for_all_vfs(adapter, vf_cfg, vf) {
4030 vf_cfg->if_handle = -1;
4031 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004032 }
Sathya Perla39f1d942012-05-08 19:41:24 +00004033 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00004034}
4035
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004036static int be_vf_setup(struct be_adapter *adapter)
4037{
Sathya Perla4c876612013-02-03 20:30:11 +00004038 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05304039 struct be_vf_cfg *vf_cfg;
4040 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04004041 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004042
Sathya Perla257a3fe2013-06-14 15:54:51 +05304043 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00004044
4045 status = be_vf_setup_init(adapter);
4046 if (status)
4047 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00004048
Sathya Perla4c876612013-02-03 20:30:11 +00004049 if (old_vfs) {
4050 for_all_vfs(adapter, vf_cfg, vf) {
4051 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4052 if (status)
4053 goto err;
4054 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004055
Sathya Perla4c876612013-02-03 20:30:11 +00004056 status = be_vfs_mac_query(adapter);
4057 if (status)
4058 goto err;
4059 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05304060 status = be_vfs_if_create(adapter);
4061 if (status)
4062 goto err;
4063
Sathya Perla39f1d942012-05-08 19:41:24 +00004064 status = be_vf_eth_addr_config(adapter);
4065 if (status)
4066 goto err;
4067 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004068
Sathya Perla11ac75e2011-12-13 00:58:50 +00004069 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05304070 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04004071 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4072 vf + 1);
4073 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05304074 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04004075 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05304076 BE_PRIV_FILTMGMT,
4077 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004078 if (!status) {
4079 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05304080 dev_info(dev, "VF%d has FILTMGMT privilege\n",
4081 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04004082 }
Sathya Perla04a06022013-07-23 15:25:00 +05304083 }
4084
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05304085 /* Allow full available bandwidth */
4086 if (!old_vfs)
4087 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00004088
Kalesh APe7bcbd72015-05-06 05:30:32 -04004089 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4090 vf_cfg->if_handle, NULL,
4091 &spoofchk);
4092 if (!status)
4093 vf_cfg->spoofchk = spoofchk;
4094
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304095 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05304096 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304097 be_cmd_set_logical_link_config(adapter,
4098 IFLA_VF_LINK_STATE_AUTO,
4099 vf+1);
4100 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004101 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00004102
4103 if (!old_vfs) {
4104 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4105 if (status) {
4106 dev_err(dev, "SRIOV enable failed\n");
4107 adapter->num_vfs = 0;
4108 goto err;
4109 }
4110 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304111
Somnath Kotur884476b2016-06-22 08:54:55 -04004112 if (BE3_chip(adapter)) {
4113 /* On BE3, enable VEB only when SRIOV is enabled */
4114 status = be_cmd_set_hsw_config(adapter, 0, 0,
4115 adapter->if_handle,
4116 PORT_FWD_TYPE_VEB, 0);
4117 if (status)
4118 goto err;
4119 }
4120
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05304121 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004122 return 0;
4123err:
Sathya Perla4c876612013-02-03 20:30:11 +00004124 dev_err(dev, "VF setup failed\n");
4125 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004126 return status;
4127}
4128
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304129/* Converting function_mode bits on BE3 to SH mc_type enums */
4130
4131static u8 be_convert_mc_type(u32 function_mode)
4132{
Suresh Reddy66064db2014-06-23 16:41:29 +05304133 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304134 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304135 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304136 return FLEX10;
4137 else if (function_mode & VNIC_MODE)
4138 return vNIC2;
4139 else if (function_mode & UMC_ENABLED)
4140 return UMC;
4141 else
4142 return MC_NONE;
4143}
4144
Sathya Perla92bf14a2013-08-27 16:57:32 +05304145/* On BE2/BE3 FW does not suggest the supported limits */
4146static void BEx_get_resources(struct be_adapter *adapter,
4147 struct be_resources *res)
4148{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304149 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304150
4151 if (be_physfn(adapter))
4152 res->max_uc_mac = BE_UC_PMAC_COUNT;
4153 else
4154 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4155
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304156 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4157
4158 if (be_is_mc(adapter)) {
4159 /* Assuming that there are 4 channels per port,
4160 * when multi-channel is enabled
4161 */
4162 if (be_is_qnq_mode(adapter))
4163 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4164 else
4165 /* In a non-qnq multichannel mode, the pvid
4166 * takes up one vlan entry
4167 */
4168 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4169 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304170 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304171 }
4172
Sathya Perla92bf14a2013-08-27 16:57:32 +05304173 res->max_mcast_mac = BE_MAX_MC;
4174
Vasundhara Volama5243da2014-03-11 18:53:07 +05304175 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4176 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4177 * *only* if it is RSS-capable.
4178 */
4179 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004180 be_virtfn(adapter) ||
4181 (be_is_mc(adapter) &&
4182 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304183 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304184 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4185 struct be_resources super_nic_res = {0};
4186
4187 /* On a SuperNIC profile, the driver needs to use the
4188 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4189 */
Somnath Koturde2b1e02016-06-06 07:22:10 -04004190 be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4191 ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4192 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304193 /* Some old versions of BE3 FW don't report max_tx_qs value */
4194 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4195 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304196 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304197 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304198
4199 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4200 !use_sriov && be_physfn(adapter))
4201 res->max_rss_qs = (adapter->be3_native) ?
4202 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4203 res->max_rx_qs = res->max_rss_qs + 1;
4204
Suresh Reddye3dc8672014-01-06 13:02:25 +05304205 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304206 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304207 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4208 else
4209 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304210
4211 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004212 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304213 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4214 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4215}
4216
Sathya Perla30128032011-11-10 19:17:57 +00004217static void be_setup_init(struct be_adapter *adapter)
4218{
4219 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004220 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004221 adapter->if_handle = -1;
4222 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004223 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304224 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004225 if (be_physfn(adapter))
4226 adapter->cmd_privileges = MAX_PRIVILEGES;
4227 else
4228 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004229}
4230
Somnath Koturde2b1e02016-06-06 07:22:10 -04004231/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4232 * However, this HW limitation is not exposed to the host via any SLI cmd.
4233 * As a result, in the case of SRIOV and in particular multi-partition configs
4234 * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4235 * for distribution between the VFs. This self-imposed limit will determine the
4236 * no: of VFs for which RSS can be enabled.
4237 */
4238void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4239{
4240 struct be_port_resources port_res = {0};
4241 u8 rss_tables_on_port;
4242 u16 max_vfs = be_max_vfs(adapter);
4243
4244 be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4245 RESOURCE_LIMITS, 0);
4246
4247 rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4248
4249 /* Each PF Pool's RSS Tables limit =
4250 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4251 */
4252 adapter->pool_res.max_rss_tables =
4253 max_vfs * rss_tables_on_port / port_res.max_vfs;
4254}
4255
Vasundhara Volambec84e62014-06-30 13:01:32 +05304256static int be_get_sriov_config(struct be_adapter *adapter)
4257{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304258 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304259 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304260
Somnath Koturde2b1e02016-06-06 07:22:10 -04004261 be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4262 RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304263
Vasundhara Volamace40af2015-03-04 00:44:34 -05004264 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304265 if (BE3_chip(adapter) && !res.max_vfs) {
4266 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4267 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4268 }
4269
Sathya Perlad3d18312014-08-01 17:47:30 +05304270 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304271
Vasundhara Volamace40af2015-03-04 00:44:34 -05004272 /* If during previous unload of the driver, the VFs were not disabled,
4273 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4274 * Instead use the TotalVFs value stored in the pci-dev struct.
4275 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304276 old_vfs = pci_num_vf(adapter->pdev);
4277 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004278 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4279 old_vfs);
4280
4281 adapter->pool_res.max_vfs =
4282 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304283 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304284 }
4285
Somnath Koturde2b1e02016-06-06 07:22:10 -04004286 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4287 be_calculate_pf_pool_rss_tables(adapter);
4288 dev_info(&adapter->pdev->dev,
4289 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4290 be_max_pf_pool_rss_tables(adapter));
4291 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304292 return 0;
4293}
4294
Vasundhara Volamace40af2015-03-04 00:44:34 -05004295static void be_alloc_sriov_res(struct be_adapter *adapter)
4296{
4297 int old_vfs = pci_num_vf(adapter->pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004298 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05004299 int status;
4300
4301 be_get_sriov_config(adapter);
4302
4303 if (!old_vfs)
4304 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4305
4306 /* When the HW is in SRIOV capable configuration, the PF-pool
4307 * resources are given to PF during driver load, if there are no
4308 * old VFs. This facility is not available in BE3 FW.
4309 * Also, this is done by FW in Lancer chip.
4310 */
4311 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004312 be_calculate_vf_res(adapter, 0, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004313 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04004314 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05004315 if (status)
4316 dev_err(&adapter->pdev->dev,
4317 "Failed to optimize SRIOV resources\n");
4318 }
4319}
4320
Sathya Perla92bf14a2013-08-27 16:57:32 +05304321static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004322{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304323 struct device *dev = &adapter->pdev->dev;
4324 struct be_resources res = {0};
4325 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004326
Sathya Perla92bf14a2013-08-27 16:57:32 +05304327 /* For Lancer, SH etc read per-function resource limits from FW.
4328 * GET_FUNC_CONFIG returns per function guaranteed limits.
4329 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4330 */
Sathya Perlace7faf02016-06-22 08:54:53 -04004331 if (BEx_chip(adapter)) {
4332 BEx_get_resources(adapter, &res);
4333 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304334 status = be_cmd_get_func_config(adapter, &res);
4335 if (status)
4336 return status;
4337
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004338 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4339 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4340 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4341 res.max_rss_qs -= 1;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004342 }
4343
Sathya Perlace7faf02016-06-22 08:54:53 -04004344 /* If RoCE is supported stash away half the EQs for RoCE */
4345 res.max_nic_evt_qs = be_roce_supported(adapter) ?
4346 res.max_evt_qs / 2 : res.max_evt_qs;
4347 adapter->res = res;
4348
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004349 /* If FW supports RSS default queue, then skip creating non-RSS
4350 * queue for non-IP traffic.
4351 */
4352 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4353 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4354
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304355 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4356 be_max_txqs(adapter), be_max_rxqs(adapter),
Sathya Perlace7faf02016-06-22 08:54:53 -04004357 be_max_rss(adapter), be_max_nic_eqs(adapter),
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304358 be_max_vfs(adapter));
4359 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4360 be_max_uc(adapter), be_max_mc(adapter),
4361 be_max_vlans(adapter));
4362
Sathya Perlae2617682016-06-22 08:54:54 -04004363 /* Ensure RX and TX queues are created in pairs at init time */
4364 adapter->cfg_num_rx_irqs =
4365 min_t(u16, netif_get_num_default_rss_queues(),
4366 be_max_qp_irqs(adapter));
4367 adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304368 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004369}
4370
Sathya Perla39f1d942012-05-08 19:41:24 +00004371static int be_get_config(struct be_adapter *adapter)
4372{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004373 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304374 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004375
Suresh Reddy980df242015-12-30 01:29:03 -05004376 status = be_cmd_get_cntl_attributes(adapter);
4377 if (status)
4378 return status;
4379
Kalesh APe97e3cd2014-07-17 16:20:26 +05304380 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004381 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304382 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004383
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004384 if (!lancer_chip(adapter) && be_physfn(adapter))
4385 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4386
Sathya Perla6b085ba2015-02-23 04:20:09 -05004387 if (BEx_chip(adapter)) {
4388 level = be_cmd_get_fw_log_level(adapter);
4389 adapter->msg_enable =
4390 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4391 }
4392
4393 be_cmd_get_acpi_wol_cap(adapter);
Sriharsha Basavapatna45f13df2016-06-06 07:22:09 -04004394 pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4395 pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
Sathya Perla6b085ba2015-02-23 04:20:09 -05004396
Vasundhara Volam21252372015-02-06 08:18:42 -05004397 be_cmd_query_port_name(adapter);
4398
4399 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304400 status = be_cmd_get_active_profile(adapter, &profile_id);
4401 if (!status)
4402 dev_info(&adapter->pdev->dev,
4403 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304404 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304405
Sathya Perla92bf14a2013-08-27 16:57:32 +05304406 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004407}
4408
Sathya Perla95046b92013-07-23 15:25:02 +05304409static int be_mac_setup(struct be_adapter *adapter)
4410{
4411 u8 mac[ETH_ALEN];
4412 int status;
4413
4414 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4415 status = be_cmd_get_perm_mac(adapter, mac);
4416 if (status)
4417 return status;
4418
4419 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4420 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304421 }
4422
Sathya Perla95046b92013-07-23 15:25:02 +05304423 return 0;
4424}
4425
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304426static void be_schedule_worker(struct be_adapter *adapter)
4427{
4428 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4429 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4430}
4431
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304432static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004433{
4434 schedule_delayed_work(&adapter->be_err_detection_work,
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304435 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004436 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4437}
4438
Sathya Perla77071332013-08-27 16:57:34 +05304439static int be_setup_queues(struct be_adapter *adapter)
4440{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304441 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304442 int status;
4443
4444 status = be_evt_queues_create(adapter);
4445 if (status)
4446 goto err;
4447
4448 status = be_tx_qs_create(adapter);
4449 if (status)
4450 goto err;
4451
4452 status = be_rx_cqs_create(adapter);
4453 if (status)
4454 goto err;
4455
4456 status = be_mcc_queues_create(adapter);
4457 if (status)
4458 goto err;
4459
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304460 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4461 if (status)
4462 goto err;
4463
4464 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4465 if (status)
4466 goto err;
4467
Sathya Perla77071332013-08-27 16:57:34 +05304468 return 0;
4469err:
4470 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4471 return status;
4472}
4473
Ajit Khaparde62219062016-02-10 22:45:53 +05304474static int be_if_create(struct be_adapter *adapter)
4475{
4476 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4477 u32 cap_flags = be_if_cap_flags(adapter);
4478 int status;
4479
Sathya Perlae2617682016-06-22 08:54:54 -04004480 if (adapter->cfg_num_rx_irqs == 1)
Ajit Khaparde62219062016-02-10 22:45:53 +05304481 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4482
4483 en_flags &= cap_flags;
4484 /* will enable all the needed filter flags in be_open() */
4485 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4486 &adapter->if_handle, 0);
4487
4488 return status;
4489}
4490
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304491int be_update_queues(struct be_adapter *adapter)
4492{
4493 struct net_device *netdev = adapter->netdev;
4494 int status;
4495
4496 if (netif_running(netdev))
4497 be_close(netdev);
4498
4499 be_cancel_worker(adapter);
4500
4501 /* If any vectors have been shared with RoCE we cannot re-program
4502 * the MSIx table.
4503 */
4504 if (!adapter->num_msix_roce_vec)
4505 be_msix_disable(adapter);
4506
4507 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304508 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4509 if (status)
4510 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304511
4512 if (!msix_enabled(adapter)) {
4513 status = be_msix_enable(adapter);
4514 if (status)
4515 return status;
4516 }
4517
Ajit Khaparde62219062016-02-10 22:45:53 +05304518 status = be_if_create(adapter);
4519 if (status)
4520 return status;
4521
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304522 status = be_setup_queues(adapter);
4523 if (status)
4524 return status;
4525
4526 be_schedule_worker(adapter);
4527
4528 if (netif_running(netdev))
4529 status = be_open(netdev);
4530
4531 return status;
4532}
4533
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004534static inline int fw_major_num(const char *fw_ver)
4535{
4536 int fw_major = 0, i;
4537
4538 i = sscanf(fw_ver, "%d.", &fw_major);
4539 if (i != 1)
4540 return 0;
4541
4542 return fw_major;
4543}
4544
Sathya Perlaf962f842015-02-23 04:20:16 -05004545/* If any VFs are already enabled don't FLR the PF */
4546static bool be_reset_required(struct be_adapter *adapter)
4547{
4548 return pci_num_vf(adapter->pdev) ? false : true;
4549}
4550
4551/* Wait for the FW to be ready and perform the required initialization */
4552static int be_func_init(struct be_adapter *adapter)
4553{
4554 int status;
4555
4556 status = be_fw_wait_ready(adapter);
4557 if (status)
4558 return status;
4559
4560 if (be_reset_required(adapter)) {
4561 status = be_cmd_reset_function(adapter);
4562 if (status)
4563 return status;
4564
4565 /* Wait for interrupts to quiesce after an FLR */
4566 msleep(100);
4567
4568 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304569 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004570 }
4571
4572 /* Tell FW we're ready to fire cmds */
4573 status = be_cmd_fw_init(adapter);
4574 if (status)
4575 return status;
4576
4577 /* Allow interrupts for other ULPs running on NIC function */
4578 be_intr_set(adapter, true);
4579
4580 return 0;
4581}
4582
Sathya Perla5fb379e2009-06-18 00:02:59 +00004583static int be_setup(struct be_adapter *adapter)
4584{
Sathya Perla39f1d942012-05-08 19:41:24 +00004585 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004586 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004587
Sathya Perlaf962f842015-02-23 04:20:16 -05004588 status = be_func_init(adapter);
4589 if (status)
4590 return status;
4591
Sathya Perla30128032011-11-10 19:17:57 +00004592 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004593
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004594 if (!lancer_chip(adapter))
4595 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004596
Suresh Reddy980df242015-12-30 01:29:03 -05004597 /* invoke this cmd first to get pf_num and vf_num which are needed
4598 * for issuing profile related cmds
4599 */
4600 if (!BEx_chip(adapter)) {
4601 status = be_cmd_get_func_config(adapter, NULL);
4602 if (status)
4603 return status;
4604 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004605
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004606 status = be_get_config(adapter);
4607 if (status)
4608 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004609
Somnath Koturde2b1e02016-06-06 07:22:10 -04004610 if (!BE2_chip(adapter) && be_physfn(adapter))
4611 be_alloc_sriov_res(adapter);
4612
4613 status = be_get_resources(adapter);
4614 if (status)
4615 goto err;
4616
4617 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4618 sizeof(*adapter->pmac_id), GFP_KERNEL);
4619 if (!adapter->pmac_id)
4620 return -ENOMEM;
4621
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004622 status = be_msix_enable(adapter);
4623 if (status)
4624 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004625
Kalesh APbcc84142015-08-05 03:27:48 -04004626 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304627 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004628 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004629 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004630
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304631 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4632 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304633 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304634 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004635 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004636 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004637
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004638 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004639
Sathya Perla95046b92013-07-23 15:25:02 +05304640 status = be_mac_setup(adapter);
4641 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004642 goto err;
4643
Kalesh APe97e3cd2014-07-17 16:20:26 +05304644 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304645 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004646
Somnath Koture9e2a902013-10-24 14:37:53 +05304647 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304648 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304649 adapter->fw_ver);
4650 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4651 }
4652
Kalesh AP00d594c2015-01-20 03:51:44 -05004653 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4654 adapter->rx_fc);
4655 if (status)
4656 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4657 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004658
Kalesh AP00d594c2015-01-20 03:51:44 -05004659 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4660 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004661
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304662 if (be_physfn(adapter))
4663 be_cmd_set_logical_link_config(adapter,
4664 IFLA_VF_LINK_STATE_AUTO, 0);
4665
Somnath Kotur884476b2016-06-22 08:54:55 -04004666 /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4667 * confusing a linux bridge or OVS that it might be connected to.
4668 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4669 * when SRIOV is not enabled.
4670 */
4671 if (BE3_chip(adapter))
4672 be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4673 PORT_FWD_TYPE_PASSTHRU, 0);
4674
Vasundhara Volambec84e62014-06-30 13:01:32 +05304675 if (adapter->num_vfs)
4676 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004677
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004678 status = be_cmd_get_phy_info(adapter);
4679 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004680 adapter->phy.fc_autoneg = 1;
4681
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304682 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304683 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004684 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004685err:
4686 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004687 return status;
4688}
4689
Ivan Vecera66268732011-12-08 01:31:21 +00004690#ifdef CONFIG_NET_POLL_CONTROLLER
4691static void be_netpoll(struct net_device *netdev)
4692{
4693 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004694 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004695 int i;
4696
Sathya Perlae49cc342012-11-27 19:50:02 +00004697 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004698 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004699 napi_schedule(&eqo->napi);
4700 }
Ivan Vecera66268732011-12-08 01:31:21 +00004701}
4702#endif
4703
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004704int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4705{
4706 const struct firmware *fw;
4707 int status;
4708
4709 if (!netif_running(adapter->netdev)) {
4710 dev_err(&adapter->pdev->dev,
4711 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304712 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004713 }
4714
4715 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4716 if (status)
4717 goto fw_exit;
4718
4719 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4720
4721 if (lancer_chip(adapter))
4722 status = lancer_fw_download(adapter, fw);
4723 else
4724 status = be_fw_download(adapter, fw);
4725
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004726 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304727 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004728
Ajit Khaparde84517482009-09-04 03:12:16 +00004729fw_exit:
4730 release_firmware(fw);
4731 return status;
4732}
4733
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004734static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4735 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004736{
4737 struct be_adapter *adapter = netdev_priv(dev);
4738 struct nlattr *attr, *br_spec;
4739 int rem;
4740 int status = 0;
4741 u16 mode = 0;
4742
4743 if (!sriov_enabled(adapter))
4744 return -EOPNOTSUPP;
4745
4746 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004747 if (!br_spec)
4748 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004749
4750 nla_for_each_nested(attr, br_spec, rem) {
4751 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4752 continue;
4753
Thomas Grafb7c1a312014-11-26 13:42:17 +01004754 if (nla_len(attr) < sizeof(mode))
4755 return -EINVAL;
4756
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004757 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004758 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4759 return -EOPNOTSUPP;
4760
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004761 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4762 return -EINVAL;
4763
4764 status = be_cmd_set_hsw_config(adapter, 0, 0,
4765 adapter->if_handle,
4766 mode == BRIDGE_MODE_VEPA ?
4767 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004768 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004769 if (status)
4770 goto err;
4771
4772 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4773 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4774
4775 return status;
4776 }
4777err:
4778 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4779 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4780
4781 return status;
4782}
4783
4784static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004785 struct net_device *dev, u32 filter_mask,
4786 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004787{
4788 struct be_adapter *adapter = netdev_priv(dev);
4789 int status = 0;
4790 u8 hsw_mode;
4791
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004792 /* BE and Lancer chips support VEB mode only */
4793 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004794 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4795 if (!pci_sriov_get_totalvfs(adapter->pdev))
4796 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004797 hsw_mode = PORT_FWD_TYPE_VEB;
4798 } else {
4799 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004800 adapter->if_handle, &hsw_mode,
4801 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004802 if (status)
4803 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004804
4805 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4806 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004807 }
4808
4809 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4810 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004811 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004812 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004813}
4814
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004815/* VxLAN offload Notes:
4816 *
4817 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4818 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4819 * is expected to work across all types of IP tunnels once exported. Skyhawk
4820 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304821 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4822 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4823 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004824 *
4825 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4826 * adds more than one port, disable offloads and don't re-enable them again
4827 * until after all the tunnels are removed.
4828 */
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004829static void be_add_vxlan_port(struct net_device *netdev,
4830 struct udp_tunnel_info *ti)
Sathya Perlac9c47142014-03-27 10:46:19 +05304831{
4832 struct be_adapter *adapter = netdev_priv(netdev);
4833 struct device *dev = &adapter->pdev->dev;
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004834 __be16 port = ti->port;
Sathya Perlac9c47142014-03-27 10:46:19 +05304835 int status;
4836
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004837 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4838 return;
4839
Ivan Veceraaf19e682015-08-14 22:30:01 +02004840 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304841 return;
4842
Jiri Benc1e5b3112015-09-17 16:11:13 +02004843 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4844 adapter->vxlan_port_aliases++;
4845 return;
4846 }
4847
Sathya Perlac9c47142014-03-27 10:46:19 +05304848 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304849 dev_info(dev,
4850 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004851 dev_info(dev, "Disabling VxLAN offloads\n");
4852 adapter->vxlan_port_count++;
4853 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304854 }
4855
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004856 if (adapter->vxlan_port_count++ >= 1)
4857 return;
4858
Sathya Perlac9c47142014-03-27 10:46:19 +05304859 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4860 OP_CONVERT_NORMAL_TO_TUNNEL);
4861 if (status) {
4862 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4863 goto err;
4864 }
4865
4866 status = be_cmd_set_vxlan_port(adapter, port);
4867 if (status) {
4868 dev_warn(dev, "Failed to add VxLAN port\n");
4869 goto err;
4870 }
4871 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4872 adapter->vxlan_port = port;
4873
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004874 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4875 NETIF_F_TSO | NETIF_F_TSO6 |
4876 NETIF_F_GSO_UDP_TUNNEL;
4877 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304878 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004879
Sathya Perlac9c47142014-03-27 10:46:19 +05304880 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4881 be16_to_cpu(port));
4882 return;
4883err:
4884 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304885}
4886
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004887static void be_del_vxlan_port(struct net_device *netdev,
4888 struct udp_tunnel_info *ti)
Sathya Perlac9c47142014-03-27 10:46:19 +05304889{
4890 struct be_adapter *adapter = netdev_priv(netdev);
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07004891 __be16 port = ti->port;
4892
4893 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
4894 return;
Sathya Perlac9c47142014-03-27 10:46:19 +05304895
Ivan Veceraaf19e682015-08-14 22:30:01 +02004896 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304897 return;
4898
4899 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004900 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304901
Jiri Benc1e5b3112015-09-17 16:11:13 +02004902 if (adapter->vxlan_port_aliases) {
4903 adapter->vxlan_port_aliases--;
4904 return;
4905 }
4906
Sathya Perlac9c47142014-03-27 10:46:19 +05304907 be_disable_vxlan_offloads(adapter);
4908
4909 dev_info(&adapter->pdev->dev,
4910 "Disabled VxLAN offloads for UDP port %d\n",
4911 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004912done:
4913 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304914}
Joe Stringer725d5482014-11-13 16:38:13 -08004915
Jesse Gross5f352272014-12-23 22:37:26 -08004916static netdev_features_t be_features_check(struct sk_buff *skb,
4917 struct net_device *dev,
4918 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004919{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304920 struct be_adapter *adapter = netdev_priv(dev);
4921 u8 l4_hdr = 0;
4922
4923 /* The code below restricts offload features for some tunneled packets.
4924 * Offload features for normal (non tunnel) packets are unchanged.
4925 */
4926 if (!skb->encapsulation ||
4927 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4928 return features;
4929
4930 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4931 * should disable tunnel offload features if it's not a VxLAN packet,
4932 * as tunnel offloads have been enabled only for VxLAN. This is done to
4933 * allow other tunneled traffic like GRE work fine while VxLAN
4934 * offloads are configured in Skyhawk-R.
4935 */
4936 switch (vlan_get_protocol(skb)) {
4937 case htons(ETH_P_IP):
4938 l4_hdr = ip_hdr(skb)->protocol;
4939 break;
4940 case htons(ETH_P_IPV6):
4941 l4_hdr = ipv6_hdr(skb)->nexthdr;
4942 break;
4943 default:
4944 return features;
4945 }
4946
4947 if (l4_hdr != IPPROTO_UDP ||
4948 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4949 skb->inner_protocol != htons(ETH_P_TEB) ||
4950 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4951 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08004952 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304953
4954 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004955}
Sathya Perlac9c47142014-03-27 10:46:19 +05304956
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304957static int be_get_phys_port_id(struct net_device *dev,
4958 struct netdev_phys_item_id *ppid)
4959{
4960 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4961 struct be_adapter *adapter = netdev_priv(dev);
4962 u8 *id;
4963
4964 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4965 return -ENOSPC;
4966
4967 ppid->id[0] = adapter->hba_port_num + 1;
4968 id = &ppid->id[1];
4969 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4970 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4971 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4972
4973 ppid->id_len = id_len;
4974
4975 return 0;
4976}
4977
stephen hemmingere5686ad2012-01-05 19:10:25 +00004978static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004979 .ndo_open = be_open,
4980 .ndo_stop = be_close,
4981 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004982 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004983 .ndo_set_mac_address = be_mac_addr_set,
4984 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004985 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004986 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004987 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4988 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004989 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004990 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004991 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004992 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304993 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004994 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00004995#ifdef CONFIG_NET_POLL_CONTROLLER
4996 .ndo_poll_controller = be_netpoll,
4997#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004998 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4999 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305000#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305001 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305002#endif
Alexander Duyckbde6b7c2016-06-16 12:21:43 -07005003 .ndo_udp_tunnel_add = be_add_vxlan_port,
5004 .ndo_udp_tunnel_del = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005005 .ndo_features_check = be_features_check,
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05305006 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005007};
5008
5009static void be_netdev_init(struct net_device *netdev)
5010{
5011 struct be_adapter *adapter = netdev_priv(netdev);
5012
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005013 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005014 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005015 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05305016 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005017 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005018
5019 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005020 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005021
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005022 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005023 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005024
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005025 netdev->priv_flags |= IFF_UNICAST_FLT;
5026
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005027 netdev->flags |= IFF_MULTICAST;
5028
ajit.khaparde@broadcom.com127bfce2016-02-23 00:35:01 +05305029 netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005030
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005031 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005032
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005033 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005034}
5035
Kalesh AP87ac1a52015-02-23 04:20:15 -05005036static void be_cleanup(struct be_adapter *adapter)
5037{
5038 struct net_device *netdev = adapter->netdev;
5039
5040 rtnl_lock();
5041 netif_device_detach(netdev);
5042 if (netif_running(netdev))
5043 be_close(netdev);
5044 rtnl_unlock();
5045
5046 be_clear(adapter);
5047}
5048
Kalesh AP484d76f2015-02-23 04:20:14 -05005049static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005050{
Kalesh APd0e1b312015-02-23 04:20:12 -05005051 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005052 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005053
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005054 status = be_setup(adapter);
5055 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005056 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005057
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005058 rtnl_lock();
5059 if (netif_running(netdev))
Kalesh APd0e1b312015-02-23 04:20:12 -05005060 status = be_open(netdev);
Hannes Frederic Sowa08d99102016-04-18 21:19:42 +02005061 rtnl_unlock();
5062
5063 if (status)
5064 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005065
Kalesh APd0e1b312015-02-23 04:20:12 -05005066 netif_device_attach(netdev);
5067
Kalesh AP484d76f2015-02-23 04:20:14 -05005068 return 0;
5069}
5070
5071static int be_err_recover(struct be_adapter *adapter)
5072{
Kalesh AP484d76f2015-02-23 04:20:14 -05005073 int status;
5074
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305075 /* Error recovery is supported only Lancer as of now */
5076 if (!lancer_chip(adapter))
5077 return -EIO;
5078
5079 /* Wait for adapter to reach quiescent state before
5080 * destroying queues
5081 */
5082 status = be_fw_wait_ready(adapter);
5083 if (status)
5084 goto err;
5085
5086 be_cleanup(adapter);
5087
Kalesh AP484d76f2015-02-23 04:20:14 -05005088 status = be_resume(adapter);
5089 if (status)
5090 goto err;
5091
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005092 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005093err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005094 return status;
5095}
5096
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005097static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005098{
5099 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005100 container_of(work, struct be_adapter,
5101 be_err_detection_work.work);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305102 struct device *dev = &adapter->pdev->dev;
5103 int recovery_status;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305104 int delay = ERR_DETECTION_DELAY;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005105
5106 be_detect_error(adapter);
5107
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305108 if (be_check_error(adapter, BE_ERROR_HW))
5109 recovery_status = be_err_recover(adapter);
5110 else
5111 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05005112
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305113 if (!recovery_status) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305114 adapter->recovery_retries = 0;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305115 dev_info(dev, "Adapter recovery successful\n");
5116 goto reschedule_task;
5117 } else if (be_virtfn(adapter)) {
5118 /* For VFs, check if PF have allocated resources
5119 * every second.
5120 */
5121 dev_err(dev, "Re-trying adapter recovery\n");
5122 goto reschedule_task;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305123 } else if (adapter->recovery_retries++ <
5124 MAX_ERR_RECOVERY_RETRY_COUNT) {
5125 /* In case of another error during recovery, it takes 30 sec
5126 * for adapter to come out of error. Retry error recovery after
5127 * this time interval.
5128 */
5129 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
5130 delay = ERR_RECOVERY_RETRY_DELAY;
5131 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305132 } else {
5133 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005134 }
5135
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05305136 return;
5137reschedule_task:
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305138 be_schedule_err_detection(adapter, delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005139}
5140
Vasundhara Volam21252372015-02-06 08:18:42 -05005141static void be_log_sfp_info(struct be_adapter *adapter)
5142{
5143 int status;
5144
5145 status = be_cmd_query_sfp_info(adapter);
5146 if (!status) {
5147 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305148 "Port %c: %s Vendor: %s part no: %s",
5149 adapter->port_name,
5150 be_misconfig_evt_port_state[adapter->phy_state],
5151 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05005152 adapter->phy.vendor_pn);
5153 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305154 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05005155}
5156
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005157static void be_worker(struct work_struct *work)
5158{
5159 struct be_adapter *adapter =
5160 container_of(work, struct be_adapter, work.work);
5161 struct be_rx_obj *rxo;
5162 int i;
5163
Guilherme G. Piccolid3480612016-07-26 17:39:42 -03005164 if (be_physfn(adapter) &&
5165 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5166 be_cmd_get_die_temperature(adapter);
5167
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005168 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005169 * mcc completions
5170 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005171 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005172 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005173 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005174 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005175 goto reschedule;
5176 }
5177
5178 if (!adapter->stats_cmd_sent) {
5179 if (lancer_chip(adapter))
5180 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305181 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005182 else
5183 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5184 }
5185
5186 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305187 /* Replenish RX-queues starved due to memory
5188 * allocation failures.
5189 */
5190 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305191 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005192 }
5193
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005194 /* EQ-delay update for Skyhawk is done while notifying EQ */
5195 if (!skyhawk_chip(adapter))
5196 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005197
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305198 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005199 be_log_sfp_info(adapter);
5200
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005201reschedule:
5202 adapter->work_counter++;
5203 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5204}
5205
Sathya Perla78fad34e2015-02-23 04:20:08 -05005206static void be_unmap_pci_bars(struct be_adapter *adapter)
5207{
5208 if (adapter->csr)
5209 pci_iounmap(adapter->pdev, adapter->csr);
5210 if (adapter->db)
5211 pci_iounmap(adapter->pdev, adapter->db);
Douglas Millera69bf3c2016-03-04 15:36:56 -06005212 if (adapter->pcicfg && adapter->pcicfg_mapped)
5213 pci_iounmap(adapter->pdev, adapter->pcicfg);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005214}
5215
5216static int db_bar(struct be_adapter *adapter)
5217{
Kalesh AP18c57c72015-05-06 05:30:38 -04005218 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005219 return 0;
5220 else
5221 return 4;
5222}
5223
5224static int be_roce_map_pci_bars(struct be_adapter *adapter)
5225{
5226 if (skyhawk_chip(adapter)) {
5227 adapter->roce_db.size = 4096;
5228 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5229 db_bar(adapter));
5230 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5231 db_bar(adapter));
5232 }
5233 return 0;
5234}
5235
5236static int be_map_pci_bars(struct be_adapter *adapter)
5237{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005238 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005239 u8 __iomem *addr;
5240 u32 sli_intf;
5241
5242 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5243 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5244 SLI_INTF_FAMILY_SHIFT;
5245 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5246
5247 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005248 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005249 if (!adapter->csr)
5250 return -ENOMEM;
5251 }
5252
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005253 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005254 if (!addr)
5255 goto pci_map_err;
5256 adapter->db = addr;
5257
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005258 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5259 if (be_physfn(adapter)) {
5260 /* PCICFG is the 2nd BAR in BE2 */
5261 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5262 if (!addr)
5263 goto pci_map_err;
5264 adapter->pcicfg = addr;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005265 adapter->pcicfg_mapped = true;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005266 } else {
5267 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Douglas Millera69bf3c2016-03-04 15:36:56 -06005268 adapter->pcicfg_mapped = false;
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005269 }
5270 }
5271
Sathya Perla78fad34e2015-02-23 04:20:08 -05005272 be_roce_map_pci_bars(adapter);
5273 return 0;
5274
5275pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005276 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005277 be_unmap_pci_bars(adapter);
5278 return -ENOMEM;
5279}
5280
5281static void be_drv_cleanup(struct be_adapter *adapter)
5282{
5283 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5284 struct device *dev = &adapter->pdev->dev;
5285
5286 if (mem->va)
5287 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5288
5289 mem = &adapter->rx_filter;
5290 if (mem->va)
5291 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5292
5293 mem = &adapter->stats_cmd;
5294 if (mem->va)
5295 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5296}
5297
5298/* Allocate and initialize various fields in be_adapter struct */
5299static int be_drv_init(struct be_adapter *adapter)
5300{
5301 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5302 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5303 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5304 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5305 struct device *dev = &adapter->pdev->dev;
5306 int status = 0;
5307
5308 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305309 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5310 &mbox_mem_alloc->dma,
5311 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005312 if (!mbox_mem_alloc->va)
5313 return -ENOMEM;
5314
5315 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5316 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5317 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005318
5319 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5320 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5321 &rx_filter->dma, GFP_KERNEL);
5322 if (!rx_filter->va) {
5323 status = -ENOMEM;
5324 goto free_mbox;
5325 }
5326
5327 if (lancer_chip(adapter))
5328 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5329 else if (BE2_chip(adapter))
5330 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5331 else if (BE3_chip(adapter))
5332 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5333 else
5334 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5335 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5336 &stats_cmd->dma, GFP_KERNEL);
5337 if (!stats_cmd->va) {
5338 status = -ENOMEM;
5339 goto free_rx_filter;
5340 }
5341
5342 mutex_init(&adapter->mbox_lock);
5343 spin_lock_init(&adapter->mcc_lock);
5344 spin_lock_init(&adapter->mcc_cq_lock);
5345 init_completion(&adapter->et_cmd_compl);
5346
5347 pci_save_state(adapter->pdev);
5348
5349 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005350 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5351 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005352
5353 adapter->rx_fc = true;
5354 adapter->tx_fc = true;
5355
5356 /* Must be a power of 2 or else MODULO will BUG_ON */
5357 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005358
5359 return 0;
5360
5361free_rx_filter:
5362 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5363free_mbox:
5364 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5365 mbox_mem_alloc->dma);
5366 return status;
5367}
5368
5369static void be_remove(struct pci_dev *pdev)
5370{
5371 struct be_adapter *adapter = pci_get_drvdata(pdev);
5372
5373 if (!adapter)
5374 return;
5375
5376 be_roce_dev_remove(adapter);
5377 be_intr_set(adapter, false);
5378
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005379 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005380
5381 unregister_netdev(adapter->netdev);
5382
5383 be_clear(adapter);
5384
5385 /* tell fw we're done with firing cmds */
5386 be_cmd_fw_clean(adapter);
5387
5388 be_unmap_pci_bars(adapter);
5389 be_drv_cleanup(adapter);
5390
5391 pci_disable_pcie_error_reporting(pdev);
5392
5393 pci_release_regions(pdev);
5394 pci_disable_device(pdev);
5395
5396 free_netdev(adapter->netdev);
5397}
5398
Arnd Bergmann9a032592015-05-18 23:06:45 +02005399static ssize_t be_hwmon_show_temp(struct device *dev,
5400 struct device_attribute *dev_attr,
5401 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305402{
5403 struct be_adapter *adapter = dev_get_drvdata(dev);
5404
5405 /* Unit: millidegree Celsius */
5406 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5407 return -EIO;
5408 else
5409 return sprintf(buf, "%u\n",
5410 adapter->hwmon_info.be_on_die_temp * 1000);
5411}
5412
5413static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5414 be_hwmon_show_temp, NULL, 1);
5415
5416static struct attribute *be_hwmon_attrs[] = {
5417 &sensor_dev_attr_temp1_input.dev_attr.attr,
5418 NULL
5419};
5420
5421ATTRIBUTE_GROUPS(be_hwmon);
5422
Sathya Perlad3791422012-09-28 04:39:44 +00005423static char *mc_name(struct be_adapter *adapter)
5424{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305425 char *str = ""; /* default */
5426
5427 switch (adapter->mc_type) {
5428 case UMC:
5429 str = "UMC";
5430 break;
5431 case FLEX10:
5432 str = "FLEX10";
5433 break;
5434 case vNIC1:
5435 str = "vNIC-1";
5436 break;
5437 case nPAR:
5438 str = "nPAR";
5439 break;
5440 case UFP:
5441 str = "UFP";
5442 break;
5443 case vNIC2:
5444 str = "vNIC-2";
5445 break;
5446 default:
5447 str = "";
5448 }
5449
5450 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005451}
5452
5453static inline char *func_name(struct be_adapter *adapter)
5454{
5455 return be_physfn(adapter) ? "PF" : "VF";
5456}
5457
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005458static inline char *nic_name(struct pci_dev *pdev)
5459{
5460 switch (pdev->device) {
5461 case OC_DEVICE_ID1:
5462 return OC_NAME;
5463 case OC_DEVICE_ID2:
5464 return OC_NAME_BE;
5465 case OC_DEVICE_ID3:
5466 case OC_DEVICE_ID4:
5467 return OC_NAME_LANCER;
5468 case BE_DEVICE_ID2:
5469 return BE3_NAME;
5470 case OC_DEVICE_ID5:
5471 case OC_DEVICE_ID6:
5472 return OC_NAME_SH;
5473 default:
5474 return BE_NAME;
5475 }
5476}
5477
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005478static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005479{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005480 struct be_adapter *adapter;
5481 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005482 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005483
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305484 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5485
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005486 status = pci_enable_device(pdev);
5487 if (status)
5488 goto do_none;
5489
5490 status = pci_request_regions(pdev, DRV_NAME);
5491 if (status)
5492 goto disable_dev;
5493 pci_set_master(pdev);
5494
Sathya Perla7f640062012-06-05 19:37:20 +00005495 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305496 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005497 status = -ENOMEM;
5498 goto rel_reg;
5499 }
5500 adapter = netdev_priv(netdev);
5501 adapter->pdev = pdev;
5502 pci_set_drvdata(pdev, adapter);
5503 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005504 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005505
Russell King4c15c242013-06-26 23:49:11 +01005506 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005507 if (!status) {
5508 netdev->features |= NETIF_F_HIGHDMA;
5509 } else {
Russell King4c15c242013-06-26 23:49:11 +01005510 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005511 if (status) {
5512 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5513 goto free_netdev;
5514 }
5515 }
5516
Kalesh AP2f951a92014-09-12 17:39:21 +05305517 status = pci_enable_pcie_error_reporting(pdev);
5518 if (!status)
5519 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005520
Sathya Perla78fad34e2015-02-23 04:20:08 -05005521 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005522 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005523 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005524
Sathya Perla78fad34e2015-02-23 04:20:08 -05005525 status = be_drv_init(adapter);
5526 if (status)
5527 goto unmap_bars;
5528
Sathya Perla5fb379e2009-06-18 00:02:59 +00005529 status = be_setup(adapter);
5530 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005531 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005532
Sathya Perla3abcded2010-10-03 22:12:27 -07005533 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005534 status = register_netdev(netdev);
5535 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005536 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005537
Parav Pandit045508a2012-03-26 14:27:13 +00005538 be_roce_dev_add(adapter);
5539
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305540 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005541
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305542 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005543 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305544 adapter->hwmon_info.hwmon_dev =
5545 devm_hwmon_device_register_with_groups(&pdev->dev,
5546 DRV_NAME,
5547 adapter,
5548 be_hwmon_groups);
5549 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5550 }
5551
Sathya Perlad3791422012-09-28 04:39:44 +00005552 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005553 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005554
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005555 return 0;
5556
Sathya Perla5fb379e2009-06-18 00:02:59 +00005557unsetup:
5558 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005559drv_cleanup:
5560 be_drv_cleanup(adapter);
5561unmap_bars:
5562 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005563free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005564 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005565rel_reg:
5566 pci_release_regions(pdev);
5567disable_dev:
5568 pci_disable_device(pdev);
5569do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005570 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005571 return status;
5572}
5573
5574static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5575{
5576 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005577
Ajit Khaparded4360d62013-11-22 12:51:09 -06005578 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005579 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005580
Kalesh AP87ac1a52015-02-23 04:20:15 -05005581 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005582
5583 pci_save_state(pdev);
5584 pci_disable_device(pdev);
5585 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5586 return 0;
5587}
5588
Kalesh AP484d76f2015-02-23 04:20:14 -05005589static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005590{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005591 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005592 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005593
5594 status = pci_enable_device(pdev);
5595 if (status)
5596 return status;
5597
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005598 pci_restore_state(pdev);
5599
Kalesh AP484d76f2015-02-23 04:20:14 -05005600 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005601 if (status)
5602 return status;
5603
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305604 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005605
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005606 return 0;
5607}
5608
Sathya Perla82456b02010-02-17 01:35:37 +00005609/*
5610 * An FLR will stop BE from DMAing any data.
5611 */
5612static void be_shutdown(struct pci_dev *pdev)
5613{
5614 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005615
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005616 if (!adapter)
5617 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005618
Devesh Sharmad114f992014-06-10 19:32:15 +05305619 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005620 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005621 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005622
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005623 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005624
Ajit Khaparde57841862011-04-06 18:08:43 +00005625 be_cmd_reset_function(adapter);
5626
Sathya Perla82456b02010-02-17 01:35:37 +00005627 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005628}
5629
Sathya Perlacf588472010-02-14 21:22:01 +00005630static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305631 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005632{
5633 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005634
5635 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5636
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05305637 be_roce_dev_remove(adapter);
5638
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305639 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5640 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005641
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005642 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005643
Kalesh AP87ac1a52015-02-23 04:20:15 -05005644 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005645 }
Sathya Perlacf588472010-02-14 21:22:01 +00005646
5647 if (state == pci_channel_io_perm_failure)
5648 return PCI_ERS_RESULT_DISCONNECT;
5649
5650 pci_disable_device(pdev);
5651
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005652 /* The error could cause the FW to trigger a flash debug dump.
5653 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005654 * can cause it not to recover; wait for it to finish.
5655 * Wait only for first function as it is needed only once per
5656 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005657 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005658 if (pdev->devfn == 0)
5659 ssleep(30);
5660
Sathya Perlacf588472010-02-14 21:22:01 +00005661 return PCI_ERS_RESULT_NEED_RESET;
5662}
5663
5664static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5665{
5666 struct be_adapter *adapter = pci_get_drvdata(pdev);
5667 int status;
5668
5669 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005670
5671 status = pci_enable_device(pdev);
5672 if (status)
5673 return PCI_ERS_RESULT_DISCONNECT;
5674
5675 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005676 pci_restore_state(pdev);
5677
5678 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005679 dev_info(&adapter->pdev->dev,
5680 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005681 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005682 if (status)
5683 return PCI_ERS_RESULT_DISCONNECT;
5684
Sathya Perlad6b6d982012-09-05 01:56:48 +00005685 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305686 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005687 return PCI_ERS_RESULT_RECOVERED;
5688}
5689
5690static void be_eeh_resume(struct pci_dev *pdev)
5691{
5692 int status = 0;
5693 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005694
5695 dev_info(&adapter->pdev->dev, "EEH resume\n");
5696
5697 pci_save_state(pdev);
5698
Kalesh AP484d76f2015-02-23 04:20:14 -05005699 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005700 if (status)
5701 goto err;
5702
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05305703 be_roce_dev_add(adapter);
5704
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305705 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00005706 return;
5707err:
5708 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005709}
5710
Vasundhara Volamace40af2015-03-04 00:44:34 -05005711static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5712{
5713 struct be_adapter *adapter = pci_get_drvdata(pdev);
Suresh Reddyb9263cb2016-06-06 07:22:08 -04005714 struct be_resources vft_res = {0};
Vasundhara Volamace40af2015-03-04 00:44:34 -05005715 int status;
5716
5717 if (!num_vfs)
5718 be_vf_clear(adapter);
5719
5720 adapter->num_vfs = num_vfs;
5721
5722 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5723 dev_warn(&pdev->dev,
5724 "Cannot disable VFs while they are assigned\n");
5725 return -EBUSY;
5726 }
5727
5728 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5729 * are equally distributed across the max-number of VFs. The user may
5730 * request only a subset of the max-vfs to be enabled.
5731 * Based on num_vfs, redistribute the resources across num_vfs so that
5732 * each VF will have access to more number of resources.
5733 * This facility is not available in BE3 FW.
5734 * Also, this is done by FW in Lancer chip.
5735 */
5736 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
Suresh Reddyb9263cb2016-06-06 07:22:08 -04005737 be_calculate_vf_res(adapter, adapter->num_vfs,
5738 &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05005739 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
Suresh Reddyb9263cb2016-06-06 07:22:08 -04005740 adapter->num_vfs, &vft_res);
Vasundhara Volamace40af2015-03-04 00:44:34 -05005741 if (status)
5742 dev_err(&pdev->dev,
5743 "Failed to optimize SR-IOV resources\n");
5744 }
5745
5746 status = be_get_resources(adapter);
5747 if (status)
5748 return be_cmd_status(status);
5749
5750 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5751 rtnl_lock();
5752 status = be_update_queues(adapter);
5753 rtnl_unlock();
5754 if (status)
5755 return be_cmd_status(status);
5756
5757 if (adapter->num_vfs)
5758 status = be_vf_setup(adapter);
5759
5760 if (!status)
5761 return adapter->num_vfs;
5762
5763 return 0;
5764}
5765
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005766static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005767 .error_detected = be_eeh_err_detected,
5768 .slot_reset = be_eeh_reset,
5769 .resume = be_eeh_resume,
5770};
5771
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005772static struct pci_driver be_driver = {
5773 .name = DRV_NAME,
5774 .id_table = be_dev_ids,
5775 .probe = be_probe,
5776 .remove = be_remove,
5777 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005778 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005779 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005780 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005781 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005782};
5783
5784static int __init be_init_module(void)
5785{
Joe Perches8e95a202009-12-03 07:58:21 +00005786 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5787 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005788 printk(KERN_WARNING DRV_NAME
5789 " : Module param rx_frag_size must be 2048/4096/8192."
5790 " Using 2048\n");
5791 rx_frag_size = 2048;
5792 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005793
Vasundhara Volamace40af2015-03-04 00:44:34 -05005794 if (num_vfs > 0) {
5795 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5796 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5797 }
5798
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005799 return pci_register_driver(&be_driver);
5800}
5801module_init(be_init_module);
5802
5803static void __exit be_exit_module(void)
5804{
5805 pci_unregister_driver(&be_driver);
5806}
5807module_exit(be_exit_module);