blob: db81e3d9623fee48618f537d85d0e4a1b2df4a4f [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -0500128#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
129 BE_IF_FLAGS_BROADCAST | \
130 BE_IF_FLAGS_MULTICAST | \
131 BE_IF_FLAGS_PASS_L3L4_ERRORS)
132
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700133static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530136
Sathya Perla1cfafab2012-02-23 18:50:15 +0000137 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000138 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
139 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000140 mem->va = NULL;
141 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700142}
143
144static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530145 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700146{
147 struct be_dma_mem *mem = &q->dma_mem;
148
149 memset(q, 0, sizeof(*q));
150 q->len = len;
151 q->entry_size = entry_size;
152 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700153 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
154 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700155 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000156 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700157 return 0;
158}
159
Somnath Kotur68c45a22013-03-14 02:42:07 +0000160static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161{
Sathya Perladb3ea782011-08-22 19:41:52 +0000162 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163
Sathya Perladb3ea782011-08-22 19:41:52 +0000164 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530165 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000166 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167
Sathya Perla5f0b8492009-07-27 22:52:56 +0000168 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000170 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000172 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000174
Sathya Perladb3ea782011-08-22 19:41:52 +0000175 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530176 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700177}
178
Somnath Kotur68c45a22013-03-14 02:42:07 +0000179static void be_intr_set(struct be_adapter *adapter, bool enable)
180{
181 int status = 0;
182
183 /* On lancer interrupts can't be controlled via this register */
184 if (lancer_chip(adapter))
185 return;
186
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530187 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000188 return;
189
190 status = be_cmd_intr_set(adapter, enable);
191 if (status)
192 be_reg_intr_set(adapter, enable);
193}
194
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196{
197 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530198
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530199 if (be_check_error(adapter, BE_ERROR_HW))
200 return;
201
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202 val |= qid & DB_RQ_RING_ID_MASK;
203 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000204
205 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000206 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207}
208
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000209static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
210 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700211{
212 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530213
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530214 if (be_check_error(adapter, BE_ERROR_HW))
215 return;
216
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000217 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000219
220 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000221 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222}
223
Sathya Perla8788fdc2009-07-27 22:52:03 +0000224static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400225 bool arm, bool clear_int, u16 num_popped,
226 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227{
228 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530229
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700230 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530231 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000232
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530233 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000234 return;
235
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700236 if (arm)
237 val |= 1 << DB_EQ_REARM_SHIFT;
238 if (clear_int)
239 val |= 1 << DB_EQ_CLR_SHIFT;
240 val |= 1 << DB_EQ_EVNT_SHIFT;
241 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400242 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000243 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244}
245
Sathya Perla8788fdc2009-07-27 22:52:03 +0000246void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247{
248 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530249
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700250 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000251 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
252 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000253
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530254 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000255 return;
256
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700257 if (arm)
258 val |= 1 << DB_CQ_REARM_SHIFT;
259 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000260 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261}
262
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700263static int be_mac_addr_set(struct net_device *netdev, void *p)
264{
265 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700267 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530268 int status;
269 u8 mac[ETH_ALEN];
270 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700271
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000272 if (!is_valid_ether_addr(addr->sa_data))
273 return -EADDRNOTAVAIL;
274
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530275 /* Proceed further only if, User provided MAC is different
276 * from active MAC
277 */
278 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
279 return 0;
280
Kalesh APbcc84142015-08-05 03:27:48 -0400281 /* if device is not running, copy MAC to netdev->dev_addr */
282 if (!netif_running(netdev))
283 goto done;
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
286 * privilege or if PF did not provision the new MAC address.
287 * On BE3, this cmd will always fail if the VF doesn't have the
288 * FILTMGMT privilege. This failure is OK, only if the PF programmed
289 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000290 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530291 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
292 adapter->if_handle, &adapter->pmac_id[0], 0);
293 if (!status) {
294 curr_pmac_id = adapter->pmac_id[0];
295
296 /* Delete the old programmed MAC. This call may fail if the
297 * old MAC was already deleted by the PF driver.
298 */
299 if (adapter->pmac_id[0] != old_pmac_id)
300 be_cmd_pmac_del(adapter, adapter->if_handle,
301 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000302 }
303
Sathya Perla5a712c12013-07-23 15:24:59 +0530304 /* Decide if the new MAC is successfully activated only after
305 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000306 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530307 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
308 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000309 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000310 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311
Sathya Perla5a712c12013-07-23 15:24:59 +0530312 /* The MAC change did not happen, either due to lack of privilege
313 * or PF didn't pre-provision.
314 */
dingtianhong61d23e92013-12-30 15:40:43 +0800315 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530316 status = -EPERM;
317 goto err;
318 }
Kalesh APbcc84142015-08-05 03:27:48 -0400319done:
320 ether_addr_copy(netdev->dev_addr, addr->sa_data);
321 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000322 return 0;
323err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530324 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700325 return status;
326}
327
Sathya Perlaca34fe32012-11-06 17:48:56 +0000328/* BE2 supports only v0 cmd */
329static void *hw_stats_from_cmd(struct be_adapter *adapter)
330{
331 if (BE2_chip(adapter)) {
332 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
333
334 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500335 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000336 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
337
338 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500339 } else {
340 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
341
342 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000343 }
344}
345
346/* BE2 supports only v0 cmd */
347static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
348{
349 if (BE2_chip(adapter)) {
350 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
351
352 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500353 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000354 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
355
356 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500357 } else {
358 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
359
360 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000361 }
362}
363
364static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000365{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000366 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
367 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
368 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000370 &rxf_stats->port[adapter->port_num];
371 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000372
Sathya Perlaac124ff2011-07-25 19:10:14 +0000373 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000374 drvs->rx_pause_frames = port_stats->rx_pause_frames;
375 drvs->rx_crc_errors = port_stats->rx_crc_errors;
376 drvs->rx_control_frames = port_stats->rx_control_frames;
377 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
378 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
379 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
380 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
381 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
382 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
383 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
384 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
385 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
386 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
387 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000388 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_dropped_header_too_small =
390 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000391 drvs->rx_address_filtered =
392 port_stats->rx_address_filtered +
393 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000394 drvs->rx_alignment_symbol_errors =
395 port_stats->rx_alignment_symbol_errors;
396
397 drvs->tx_pauseframes = port_stats->tx_pauseframes;
398 drvs->tx_controlframes = port_stats->tx_controlframes;
399
400 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000401 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000404 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->forwarded_packets = rxf_stats->forwarded_packets;
407 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000408 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
409 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
411}
412
Sathya Perlaca34fe32012-11-06 17:48:56 +0000413static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000414{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000415 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
416 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
417 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000418 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000419 &rxf_stats->port[adapter->port_num];
420 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421
Sathya Perlaac124ff2011-07-25 19:10:14 +0000422 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000423 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
424 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 drvs->rx_pause_frames = port_stats->rx_pause_frames;
426 drvs->rx_crc_errors = port_stats->rx_crc_errors;
427 drvs->rx_control_frames = port_stats->rx_control_frames;
428 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
429 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
430 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
431 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
432 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
433 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
434 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
435 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
436 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
437 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
438 drvs->rx_dropped_header_too_small =
439 port_stats->rx_dropped_header_too_small;
440 drvs->rx_input_fifo_overflow_drop =
441 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000442 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 drvs->rx_alignment_symbol_errors =
444 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000445 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->tx_pauseframes = port_stats->tx_pauseframes;
447 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000448 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000449 drvs->jabber_events = port_stats->jabber_events;
450 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000452 drvs->forwarded_packets = rxf_stats->forwarded_packets;
453 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000454 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
455 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000456 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
457}
458
Ajit Khaparde61000862013-10-03 16:16:33 -0500459static void populate_be_v2_stats(struct be_adapter *adapter)
460{
461 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
462 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
463 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
464 struct be_port_rxf_stats_v2 *port_stats =
465 &rxf_stats->port[adapter->port_num];
466 struct be_drv_stats *drvs = &adapter->drv_stats;
467
468 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
469 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
470 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
471 drvs->rx_pause_frames = port_stats->rx_pause_frames;
472 drvs->rx_crc_errors = port_stats->rx_crc_errors;
473 drvs->rx_control_frames = port_stats->rx_control_frames;
474 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
475 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
476 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
477 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
478 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
479 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
480 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
481 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
482 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
483 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
484 drvs->rx_dropped_header_too_small =
485 port_stats->rx_dropped_header_too_small;
486 drvs->rx_input_fifo_overflow_drop =
487 port_stats->rx_input_fifo_overflow_drop;
488 drvs->rx_address_filtered = port_stats->rx_address_filtered;
489 drvs->rx_alignment_symbol_errors =
490 port_stats->rx_alignment_symbol_errors;
491 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
492 drvs->tx_pauseframes = port_stats->tx_pauseframes;
493 drvs->tx_controlframes = port_stats->tx_controlframes;
494 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
495 drvs->jabber_events = port_stats->jabber_events;
496 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
497 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
498 drvs->forwarded_packets = rxf_stats->forwarded_packets;
499 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
500 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
501 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
502 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530503 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500504 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
505 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
506 drvs->rx_roce_frames = port_stats->roce_frames_received;
507 drvs->roce_drops_crc = port_stats->roce_drops_crc;
508 drvs->roce_drops_payload_len =
509 port_stats->roce_drops_payload_len;
510 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500511}
512
Selvin Xavier005d5692011-05-16 07:36:35 +0000513static void populate_lancer_stats(struct be_adapter *adapter)
514{
Selvin Xavier005d5692011-05-16 07:36:35 +0000515 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530516 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000517
518 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
519 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
520 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
521 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000522 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000523 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
525 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
526 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
527 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
528 drvs->rx_dropped_tcp_length =
529 pport_stats->rx_dropped_invalid_tcp_length;
530 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
531 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
532 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
533 drvs->rx_dropped_header_too_small =
534 pport_stats->rx_dropped_header_too_small;
535 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000536 drvs->rx_address_filtered =
537 pport_stats->rx_address_filtered +
538 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000539 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000540 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000541 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
542 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000543 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000544 drvs->forwarded_packets = pport_stats->num_forwards_lo;
545 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000546 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000547 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000548}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000549
Sathya Perla09c1c682011-08-22 19:41:53 +0000550static void accumulate_16bit_val(u32 *acc, u16 val)
551{
552#define lo(x) (x & 0xFFFF)
553#define hi(x) (x & 0xFFFF0000)
554 bool wrapped = val < lo(*acc);
555 u32 newacc = hi(*acc) + val;
556
557 if (wrapped)
558 newacc += 65536;
559 ACCESS_ONCE(*acc) = newacc;
560}
561
Jingoo Han4188e7d2013-08-05 18:02:02 +0900562static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530563 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000564{
565 if (!BEx_chip(adapter))
566 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
567 else
568 /* below erx HW counter can actually wrap around after
569 * 65535. Driver accumulates a 32-bit value
570 */
571 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
572 (u16)erx_stat);
573}
574
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000575void be_parse_stats(struct be_adapter *adapter)
576{
Ajit Khaparde61000862013-10-03 16:16:33 -0500577 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000578 struct be_rx_obj *rxo;
579 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000580 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000581
Sathya Perlaca34fe32012-11-06 17:48:56 +0000582 if (lancer_chip(adapter)) {
583 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000584 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000585 if (BE2_chip(adapter))
586 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500587 else if (BE3_chip(adapter))
588 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000589 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500590 else
591 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000592
Ajit Khaparde61000862013-10-03 16:16:33 -0500593 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000594 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000595 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
596 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000597 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000598 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000599}
600
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530602 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700603{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000604 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000605 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000607 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 u64 pkts, bytes;
609 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700610 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611
Sathya Perla3abcded2010-10-03 22:12:27 -0700612 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530614
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700616 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 pkts = rx_stats(rxo)->rx_pkts;
618 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700619 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000620 stats->rx_packets += pkts;
621 stats->rx_bytes += bytes;
622 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
623 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
624 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700625 }
626
Sathya Perla3c8def92011-06-12 20:01:58 +0000627 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000628 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530629
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700631 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000632 pkts = tx_stats(txo)->tx_pkts;
633 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700634 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->tx_packets += pkts;
636 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000637 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000641 drvs->rx_alignment_symbol_errors +
642 drvs->rx_in_range_errors +
643 drvs->rx_out_range_errors +
644 drvs->rx_frame_too_long +
645 drvs->rx_dropped_too_small +
646 drvs->rx_dropped_too_short +
647 drvs->rx_dropped_header_too_small +
648 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000649 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000652 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000653 drvs->rx_out_range_errors +
654 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000655
Sathya Perlaab1594e2011-07-25 19:10:15 +0000656 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657
658 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000659 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000660
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 /* receiver fifo overrun */
662 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000663 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000664 drvs->rx_input_fifo_overflow_drop +
665 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000666 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667}
668
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000669void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 struct net_device *netdev = adapter->netdev;
672
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000673 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000674 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000675 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000677
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530678 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000679 netif_carrier_on(netdev);
680 else
681 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200682
683 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684}
685
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500686static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700687{
Sathya Perla3c8def92011-06-12 20:01:58 +0000688 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530689 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000690
Sathya Perlaab1594e2011-07-25 19:10:15 +0000691 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000692 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500693 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530694 stats->tx_pkts += tx_pkts;
695 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
696 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000697 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698}
699
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500700/* Returns number of WRBs needed for the skb */
701static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700702{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500703 /* +1 for the header wrb */
704 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705}
706
707static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
708{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500709 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
710 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
711 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
712 wrb->rsvd0 = 0;
713}
714
715/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
716 * to avoid the swap and shift/mask operations in wrb_fill().
717 */
718static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
719{
720 wrb->frag_pa_hi = 0;
721 wrb->frag_pa_lo = 0;
722 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000723 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724}
725
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000726static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530727 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000728{
729 u8 vlan_prio;
730 u16 vlan_tag;
731
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100732 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000733 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
734 /* If vlan priority provided by OS is NOT in available bmap */
735 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
736 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500737 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000738
739 return vlan_tag;
740}
741
Sathya Perlac9c47142014-03-27 10:46:19 +0530742/* Used only for IP tunnel packets */
743static u16 skb_inner_ip_proto(struct sk_buff *skb)
744{
745 return (inner_ip_hdr(skb)->version == 4) ?
746 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
747}
748
749static u16 skb_ip_proto(struct sk_buff *skb)
750{
751 return (ip_hdr(skb)->version == 4) ?
752 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
753}
754
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530755static inline bool be_is_txq_full(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
758}
759
760static inline bool be_can_txq_wake(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) < txo->q.len / 2;
763}
764
765static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
766{
767 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
768}
769
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530770static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
771 struct sk_buff *skb,
772 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530774 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000776 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530777 BE_WRB_F_SET(wrb_params->features, LSO, 1);
778 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000779 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530780 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530782 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530783 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530784 proto = skb_inner_ip_proto(skb);
785 } else {
786 proto = skb_ip_proto(skb);
787 }
788 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530789 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530790 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530791 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
793
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100794 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530795 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
796 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797 }
798
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530799 BE_WRB_F_SET(wrb_params->features, CRC, 1);
800}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500801
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530802static void wrb_fill_hdr(struct be_adapter *adapter,
803 struct be_eth_hdr_wrb *hdr,
804 struct be_wrb_params *wrb_params,
805 struct sk_buff *skb)
806{
807 memset(hdr, 0, sizeof(*hdr));
808
809 SET_TX_WRB_HDR_BITS(crc, hdr,
810 BE_WRB_F_GET(wrb_params->features, CRC));
811 SET_TX_WRB_HDR_BITS(ipcs, hdr,
812 BE_WRB_F_GET(wrb_params->features, IPCS));
813 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
814 BE_WRB_F_GET(wrb_params->features, TCPCS));
815 SET_TX_WRB_HDR_BITS(udpcs, hdr,
816 BE_WRB_F_GET(wrb_params->features, UDPCS));
817
818 SET_TX_WRB_HDR_BITS(lso, hdr,
819 BE_WRB_F_GET(wrb_params->features, LSO));
820 SET_TX_WRB_HDR_BITS(lso6, hdr,
821 BE_WRB_F_GET(wrb_params->features, LSO6));
822 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
823
824 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
825 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500826 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530827 SET_TX_WRB_HDR_BITS(event, hdr,
828 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
829 SET_TX_WRB_HDR_BITS(vlan, hdr,
830 BE_WRB_F_GET(wrb_params->features, VLAN));
831 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
832
833 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
834 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530835 SET_TX_WRB_HDR_BITS(mgmt, hdr,
836 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700837}
838
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000839static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530840 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000841{
842 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500843 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000844
Sathya Perla7101e112010-03-22 20:41:12 +0000845
Sathya Perlaf986afc2015-02-06 08:18:43 -0500846 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
847 (u64)le32_to_cpu(wrb->frag_pa_lo);
848 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000849 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500850 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000851 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500852 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000853 }
854}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530856/* Grab a WRB header for xmit */
857static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530859 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700860
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530861 queue_head_inc(&txo->q);
862 return head;
863}
864
865/* Set up the WRB header for xmit */
866static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
867 struct be_tx_obj *txo,
868 struct be_wrb_params *wrb_params,
869 struct sk_buff *skb, u16 head)
870{
871 u32 num_frags = skb_wrb_cnt(skb);
872 struct be_queue_info *txq = &txo->q;
873 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
874
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530875 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500876 be_dws_cpu_to_le(hdr, sizeof(*hdr));
877
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500878 BUG_ON(txo->sent_skb_list[head]);
879 txo->sent_skb_list[head] = skb;
880 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530881 atomic_add(num_frags, &txq->used);
882 txo->last_req_wrb_cnt = num_frags;
883 txo->pend_wrb_cnt += num_frags;
884}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700885
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530886/* Setup a WRB fragment (buffer descriptor) for xmit */
887static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
888 int len)
889{
890 struct be_eth_wrb *wrb;
891 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700892
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530893 wrb = queue_head_node(txq);
894 wrb_fill(wrb, busaddr, len);
895 queue_head_inc(txq);
896}
897
898/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
899 * was invoked. The producer index is restored to the previous packet and the
900 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
901 */
902static void be_xmit_restore(struct be_adapter *adapter,
903 struct be_tx_obj *txo, u16 head, bool map_single,
904 u32 copied)
905{
906 struct device *dev;
907 struct be_eth_wrb *wrb;
908 struct be_queue_info *txq = &txo->q;
909
910 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500911 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530912
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500913 /* skip the first wrb (hdr); it's not mapped */
914 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000915 while (copied) {
916 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000917 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000918 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500919 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000920 queue_head_inc(txq);
921 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530922
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500923 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530924}
925
926/* Enqueue the given packet for transmit. This routine allocates WRBs for the
927 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
928 * of WRBs used up by the packet.
929 */
930static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
931 struct sk_buff *skb,
932 struct be_wrb_params *wrb_params)
933{
934 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
935 struct device *dev = &adapter->pdev->dev;
936 struct be_queue_info *txq = &txo->q;
937 bool map_single = false;
938 u16 head = txq->head;
939 dma_addr_t busaddr;
940 int len;
941
942 head = be_tx_get_wrb_hdr(txo);
943
944 if (skb->len > skb->data_len) {
945 len = skb_headlen(skb);
946
947 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
948 if (dma_mapping_error(dev, busaddr))
949 goto dma_err;
950 map_single = true;
951 be_tx_setup_wrb_frag(txo, busaddr, len);
952 copied += len;
953 }
954
955 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
956 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
957 len = skb_frag_size(frag);
958
959 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
960 if (dma_mapping_error(dev, busaddr))
961 goto dma_err;
962 be_tx_setup_wrb_frag(txo, busaddr, len);
963 copied += len;
964 }
965
966 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
967
968 be_tx_stats_update(txo, skb);
969 return wrb_cnt;
970
971dma_err:
972 adapter->drv_stats.dma_map_errors++;
973 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000974 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700975}
976
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500977static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
978{
979 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
980}
981
Somnath Kotur93040ae2012-06-26 22:32:10 +0000982static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000983 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530984 struct be_wrb_params
985 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000986{
987 u16 vlan_tag = 0;
988
989 skb = skb_share_check(skb, GFP_ATOMIC);
990 if (unlikely(!skb))
991 return skb;
992
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100993 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000994 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530995
996 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
997 if (!vlan_tag)
998 vlan_tag = adapter->pvid;
999 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1000 * skip VLAN insertion
1001 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301002 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +05301003 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001004
1005 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001006 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1007 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001008 if (unlikely(!skb))
1009 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001010 skb->vlan_tci = 0;
1011 }
1012
1013 /* Insert the outer VLAN, if any */
1014 if (adapter->qnq_vid) {
1015 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001016 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1017 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001018 if (unlikely(!skb))
1019 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301020 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001021 }
1022
Somnath Kotur93040ae2012-06-26 22:32:10 +00001023 return skb;
1024}
1025
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001026static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1027{
1028 struct ethhdr *eh = (struct ethhdr *)skb->data;
1029 u16 offset = ETH_HLEN;
1030
1031 if (eh->h_proto == htons(ETH_P_IPV6)) {
1032 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1033
1034 offset += sizeof(struct ipv6hdr);
1035 if (ip6h->nexthdr != NEXTHDR_TCP &&
1036 ip6h->nexthdr != NEXTHDR_UDP) {
1037 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301038 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001039
1040 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1041 if (ehdr->hdrlen == 0xff)
1042 return true;
1043 }
1044 }
1045 return false;
1046}
1047
1048static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1049{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001050 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051}
1052
Sathya Perla748b5392014-05-09 13:29:13 +05301053static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001054{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001055 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001056}
1057
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301058static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1059 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301060 struct be_wrb_params
1061 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001063 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001064 unsigned int eth_hdr_len;
1065 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001066
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001067 /* For padded packets, BE HW modifies tot_len field in IP header
1068 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001069 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001070 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001071 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1072 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001073 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001074 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001075 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001076 ip = (struct iphdr *)ip_hdr(skb);
1077 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1078 }
1079
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001080 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301081 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001082 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301083 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001084 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301085 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001086
Somnath Kotur93040ae2012-06-26 22:32:10 +00001087 /* HW has a bug wherein it will calculate CSUM for VLAN
1088 * pkts even though it is disabled.
1089 * Manually insert VLAN in pkt.
1090 */
1091 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001092 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301093 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001094 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301095 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001096 }
1097
1098 /* HW may lockup when VLAN HW tagging is requested on
1099 * certain ipv6 packets. Drop such pkts if the HW workaround to
1100 * skip HW tagging is not enabled by FW.
1101 */
1102 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301103 (adapter->pvid || adapter->qnq_vid) &&
1104 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001105 goto tx_drop;
1106
1107 /* Manual VLAN tag insertion to prevent:
1108 * ASIC lockup when the ASIC inserts VLAN tag into
1109 * certain ipv6 packets. Insert VLAN tags in driver,
1110 * and set event, completion, vlan bits accordingly
1111 * in the Tx WRB.
1112 */
1113 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1114 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301115 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001116 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301117 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001118 }
1119
Sathya Perlaee9c7992013-05-22 23:04:55 +00001120 return skb;
1121tx_drop:
1122 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301123err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001124 return NULL;
1125}
1126
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301127static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1128 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301129 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301130{
Suresh Reddy8227e992015-10-12 03:47:19 -04001131 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1132 * packets that are 32b or less may cause a transmit stall
1133 * on that port. The workaround is to pad such packets
1134 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301135 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001136 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001137 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301138 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301139 }
1140
1141 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301142 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301143 if (!skb)
1144 return NULL;
1145 }
1146
1147 return skb;
1148}
1149
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001150static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1151{
1152 struct be_queue_info *txq = &txo->q;
1153 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1154
1155 /* Mark the last request eventable if it hasn't been marked already */
1156 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1157 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1158
1159 /* compose a dummy wrb if there are odd set of wrbs to notify */
1160 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001161 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001162 queue_head_inc(txq);
1163 atomic_inc(&txq->used);
1164 txo->pend_wrb_cnt++;
1165 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1166 TX_HDR_WRB_NUM_SHIFT);
1167 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1168 TX_HDR_WRB_NUM_SHIFT);
1169 }
1170 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1171 txo->pend_wrb_cnt = 0;
1172}
1173
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301174/* OS2BMC related */
1175
1176#define DHCP_CLIENT_PORT 68
1177#define DHCP_SERVER_PORT 67
1178#define NET_BIOS_PORT1 137
1179#define NET_BIOS_PORT2 138
1180#define DHCPV6_RAS_PORT 547
1181
1182#define is_mc_allowed_on_bmc(adapter, eh) \
1183 (!is_multicast_filt_enabled(adapter) && \
1184 is_multicast_ether_addr(eh->h_dest) && \
1185 !is_broadcast_ether_addr(eh->h_dest))
1186
1187#define is_bc_allowed_on_bmc(adapter, eh) \
1188 (!is_broadcast_filt_enabled(adapter) && \
1189 is_broadcast_ether_addr(eh->h_dest))
1190
1191#define is_arp_allowed_on_bmc(adapter, skb) \
1192 (is_arp(skb) && is_arp_filt_enabled(adapter))
1193
1194#define is_broadcast_packet(eh, adapter) \
1195 (is_multicast_ether_addr(eh->h_dest) && \
1196 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1197
1198#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1199
1200#define is_arp_filt_enabled(adapter) \
1201 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1202
1203#define is_dhcp_client_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1205
1206#define is_dhcp_srvr_filt_enabled(adapter) \
1207 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1208
1209#define is_nbios_filt_enabled(adapter) \
1210 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1211
1212#define is_ipv6_na_filt_enabled(adapter) \
1213 (adapter->bmc_filt_mask & \
1214 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1215
1216#define is_ipv6_ra_filt_enabled(adapter) \
1217 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1218
1219#define is_ipv6_ras_filt_enabled(adapter) \
1220 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1221
1222#define is_broadcast_filt_enabled(adapter) \
1223 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1224
1225#define is_multicast_filt_enabled(adapter) \
1226 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1227
1228static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1229 struct sk_buff **skb)
1230{
1231 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1232 bool os2bmc = false;
1233
1234 if (!be_is_os2bmc_enabled(adapter))
1235 goto done;
1236
1237 if (!is_multicast_ether_addr(eh->h_dest))
1238 goto done;
1239
1240 if (is_mc_allowed_on_bmc(adapter, eh) ||
1241 is_bc_allowed_on_bmc(adapter, eh) ||
1242 is_arp_allowed_on_bmc(adapter, (*skb))) {
1243 os2bmc = true;
1244 goto done;
1245 }
1246
1247 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1248 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1249 u8 nexthdr = hdr->nexthdr;
1250
1251 if (nexthdr == IPPROTO_ICMPV6) {
1252 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1253
1254 switch (icmp6->icmp6_type) {
1255 case NDISC_ROUTER_ADVERTISEMENT:
1256 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1257 goto done;
1258 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1259 os2bmc = is_ipv6_na_filt_enabled(adapter);
1260 goto done;
1261 default:
1262 break;
1263 }
1264 }
1265 }
1266
1267 if (is_udp_pkt((*skb))) {
1268 struct udphdr *udp = udp_hdr((*skb));
1269
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001270 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301271 case DHCP_CLIENT_PORT:
1272 os2bmc = is_dhcp_client_filt_enabled(adapter);
1273 goto done;
1274 case DHCP_SERVER_PORT:
1275 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1276 goto done;
1277 case NET_BIOS_PORT1:
1278 case NET_BIOS_PORT2:
1279 os2bmc = is_nbios_filt_enabled(adapter);
1280 goto done;
1281 case DHCPV6_RAS_PORT:
1282 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1283 goto done;
1284 default:
1285 break;
1286 }
1287 }
1288done:
1289 /* For packets over a vlan, which are destined
1290 * to BMC, asic expects the vlan to be inline in the packet.
1291 */
1292 if (os2bmc)
1293 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1294
1295 return os2bmc;
1296}
1297
Sathya Perlaee9c7992013-05-22 23:04:55 +00001298static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1299{
1300 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001301 u16 q_idx = skb_get_queue_mapping(skb);
1302 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301303 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301304 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001305 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001306
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301307 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001308 if (unlikely(!skb))
1309 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001310
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301311 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1312
1313 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001314 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001315 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001316 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001317 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001318
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301319 /* if os2bmc is enabled and if the pkt is destined to bmc,
1320 * enqueue the pkt a 2nd time with mgmt bit set.
1321 */
1322 if (be_send_pkt_to_bmc(adapter, &skb)) {
1323 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1324 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1325 if (unlikely(!wrb_cnt))
1326 goto drop;
1327 else
1328 skb_get(skb);
1329 }
1330
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301331 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001332 netif_stop_subqueue(netdev, q_idx);
1333 tx_stats(txo)->tx_stops++;
1334 }
1335
1336 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1337 be_xmit_flush(adapter, txo);
1338
1339 return NETDEV_TX_OK;
1340drop:
1341 tx_stats(txo)->tx_drv_drops++;
1342 /* Flush the already enqueued tx requests */
1343 if (flush && txo->pend_wrb_cnt)
1344 be_xmit_flush(adapter, txo);
1345
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001346 return NETDEV_TX_OK;
1347}
1348
1349static int be_change_mtu(struct net_device *netdev, int new_mtu)
1350{
1351 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301352 struct device *dev = &adapter->pdev->dev;
1353
1354 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1355 dev_info(dev, "MTU must be between %d and %d bytes\n",
1356 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357 return -EINVAL;
1358 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301359
1360 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301361 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001362 netdev->mtu = new_mtu;
1363 return 0;
1364}
1365
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001366static inline bool be_in_all_promisc(struct be_adapter *adapter)
1367{
1368 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1369 BE_IF_FLAGS_ALL_PROMISCUOUS;
1370}
1371
1372static int be_set_vlan_promisc(struct be_adapter *adapter)
1373{
1374 struct device *dev = &adapter->pdev->dev;
1375 int status;
1376
1377 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1378 return 0;
1379
1380 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1381 if (!status) {
1382 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1383 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1384 } else {
1385 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1386 }
1387 return status;
1388}
1389
1390static int be_clear_vlan_promisc(struct be_adapter *adapter)
1391{
1392 struct device *dev = &adapter->pdev->dev;
1393 int status;
1394
1395 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1396 if (!status) {
1397 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1398 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1399 }
1400 return status;
1401}
1402
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001404 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1405 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001406 */
Sathya Perla10329df2012-06-05 19:37:18 +00001407static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408{
Vasundhara Volam50762662014-09-12 17:39:14 +05301409 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001410 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301411 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001412 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001413
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001414 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001415 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001416 return 0;
1417
Sathya Perla92bf14a2013-08-27 16:57:32 +05301418 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001419 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001420
1421 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301422 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1423 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001424
Vasundhara Volam435452a2015-03-20 06:28:23 -04001425 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001426 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001427 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001428 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001429 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1430 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301431 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001432 return be_set_vlan_promisc(adapter);
1433 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1434 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001436 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001437}
1438
Patrick McHardy80d5c362013-04-19 02:04:28 +00001439static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440{
1441 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001442 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001443
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001444 /* Packets with VID 0 are always received by Lancer by default */
1445 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301446 return status;
1447
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301448 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301449 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001450
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301451 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301452 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001453
Somnath Kotura6b74e02014-01-21 15:50:55 +05301454 status = be_vid_config(adapter);
1455 if (status) {
1456 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301457 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301458 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301459
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001460 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461}
1462
Patrick McHardy80d5c362013-04-19 02:04:28 +00001463static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464{
1465 struct be_adapter *adapter = netdev_priv(netdev);
1466
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001467 /* Packets with VID 0 are always received by Lancer by default */
1468 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301469 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001470
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301471 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301472 adapter->vlans_added--;
1473
1474 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475}
1476
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001477static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301478{
Sathya Perlaac34b742015-02-06 08:18:40 -05001479 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001480 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1481}
1482
1483static void be_set_all_promisc(struct be_adapter *adapter)
1484{
1485 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1486 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1487}
1488
1489static void be_set_mc_promisc(struct be_adapter *adapter)
1490{
1491 int status;
1492
1493 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1494 return;
1495
1496 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1497 if (!status)
1498 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1499}
1500
1501static void be_set_mc_list(struct be_adapter *adapter)
1502{
1503 int status;
1504
1505 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1506 if (!status)
1507 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1508 else
1509 be_set_mc_promisc(adapter);
1510}
1511
1512static void be_set_uc_list(struct be_adapter *adapter)
1513{
1514 struct netdev_hw_addr *ha;
1515 int i = 1; /* First slot is claimed by the Primary MAC */
1516
1517 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1518 be_cmd_pmac_del(adapter, adapter->if_handle,
1519 adapter->pmac_id[i], 0);
1520
1521 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1522 be_set_all_promisc(adapter);
1523 return;
1524 }
1525
1526 netdev_for_each_uc_addr(ha, adapter->netdev) {
1527 adapter->uc_macs++; /* First slot is for Primary MAC */
1528 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1529 &adapter->pmac_id[adapter->uc_macs], 0);
1530 }
1531}
1532
1533static void be_clear_uc_list(struct be_adapter *adapter)
1534{
1535 int i;
1536
1537 for (i = 1; i < (adapter->uc_macs + 1); i++)
1538 be_cmd_pmac_del(adapter, adapter->if_handle,
1539 adapter->pmac_id[i], 0);
1540 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301541}
1542
Sathya Perlaa54769f2011-10-24 02:45:00 +00001543static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544{
1545 struct be_adapter *adapter = netdev_priv(netdev);
1546
1547 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001548 be_set_all_promisc(adapter);
1549 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001550 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001551
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001552 /* Interface was previously in promiscuous mode; disable it */
1553 if (be_in_all_promisc(adapter)) {
1554 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001555 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001556 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001557 }
1558
Sathya Perlae7b909a2009-11-22 22:01:10 +00001559 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001560 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001561 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1562 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301563 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001564 }
Kalesh APa0794882014-05-30 19:06:23 +05301565
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001566 if (netdev_uc_count(netdev) != adapter->uc_macs)
1567 be_set_uc_list(adapter);
1568
1569 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570}
1571
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001572static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1573{
1574 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001575 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001576 int status;
1577
Sathya Perla11ac75e2011-12-13 00:58:50 +00001578 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001579 return -EPERM;
1580
Sathya Perla11ac75e2011-12-13 00:58:50 +00001581 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001582 return -EINVAL;
1583
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301584 /* Proceed further only if user provided MAC is different
1585 * from active MAC
1586 */
1587 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1588 return 0;
1589
Sathya Perla3175d8c2013-07-23 15:25:03 +05301590 if (BEx_chip(adapter)) {
1591 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1592 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001593
Sathya Perla11ac75e2011-12-13 00:58:50 +00001594 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1595 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301596 } else {
1597 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1598 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001599 }
1600
Kalesh APabccf232014-07-17 16:20:24 +05301601 if (status) {
1602 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1603 mac, vf, status);
1604 return be_cmd_status(status);
1605 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001606
Kalesh APabccf232014-07-17 16:20:24 +05301607 ether_addr_copy(vf_cfg->mac_addr, mac);
1608
1609 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001610}
1611
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001612static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301613 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001614{
1615 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001616 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001617
Sathya Perla11ac75e2011-12-13 00:58:50 +00001618 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001619 return -EPERM;
1620
Sathya Perla11ac75e2011-12-13 00:58:50 +00001621 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001622 return -EINVAL;
1623
1624 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001625 vi->max_tx_rate = vf_cfg->tx_rate;
1626 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001627 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1628 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001629 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301630 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001631 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001632
1633 return 0;
1634}
1635
Vasundhara Volam435452a2015-03-20 06:28:23 -04001636static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1637{
1638 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1639 u16 vids[BE_NUM_VLANS_SUPPORTED];
1640 int vf_if_id = vf_cfg->if_handle;
1641 int status;
1642
1643 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001644 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001645 if (status)
1646 return status;
1647
1648 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1649 vids[0] = 0;
1650 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1651 if (!status)
1652 dev_info(&adapter->pdev->dev,
1653 "Cleared guest VLANs on VF%d", vf);
1654
1655 /* After TVT is enabled, disallow VFs to program VLAN filters */
1656 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1657 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1658 ~BE_PRIV_FILTMGMT, vf + 1);
1659 if (!status)
1660 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1661 }
1662 return 0;
1663}
1664
1665static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1666{
1667 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1668 struct device *dev = &adapter->pdev->dev;
1669 int status;
1670
1671 /* Reset Transparent VLAN Tagging. */
1672 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001673 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001674 if (status)
1675 return status;
1676
1677 /* Allow VFs to program VLAN filtering */
1678 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1679 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1680 BE_PRIV_FILTMGMT, vf + 1);
1681 if (!status) {
1682 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1683 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1684 }
1685 }
1686
1687 dev_info(dev,
1688 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1689 return 0;
1690}
1691
Sathya Perla748b5392014-05-09 13:29:13 +05301692static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001693{
1694 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001695 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001696 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001697
Sathya Perla11ac75e2011-12-13 00:58:50 +00001698 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001699 return -EPERM;
1700
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001701 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001702 return -EINVAL;
1703
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001704 if (vlan || qos) {
1705 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001706 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001707 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001708 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001709 }
1710
Kalesh APabccf232014-07-17 16:20:24 +05301711 if (status) {
1712 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001713 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1714 status);
Kalesh APabccf232014-07-17 16:20:24 +05301715 return be_cmd_status(status);
1716 }
1717
1718 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301719 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001720}
1721
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001722static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1723 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001724{
1725 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301726 struct device *dev = &adapter->pdev->dev;
1727 int percent_rate, status = 0;
1728 u16 link_speed = 0;
1729 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001730
Sathya Perla11ac75e2011-12-13 00:58:50 +00001731 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001732 return -EPERM;
1733
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001734 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001735 return -EINVAL;
1736
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001737 if (min_tx_rate)
1738 return -EINVAL;
1739
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301740 if (!max_tx_rate)
1741 goto config_qos;
1742
1743 status = be_cmd_link_status_query(adapter, &link_speed,
1744 &link_status, 0);
1745 if (status)
1746 goto err;
1747
1748 if (!link_status) {
1749 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301750 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301751 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001752 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001753
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301754 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1755 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1756 link_speed);
1757 status = -EINVAL;
1758 goto err;
1759 }
1760
1761 /* On Skyhawk the QOS setting must be done only as a % value */
1762 percent_rate = link_speed / 100;
1763 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1764 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1765 percent_rate);
1766 status = -EINVAL;
1767 goto err;
1768 }
1769
1770config_qos:
1771 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001772 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301773 goto err;
1774
1775 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1776 return 0;
1777
1778err:
1779 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1780 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301781 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001782}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301783
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301784static int be_set_vf_link_state(struct net_device *netdev, int vf,
1785 int link_state)
1786{
1787 struct be_adapter *adapter = netdev_priv(netdev);
1788 int status;
1789
1790 if (!sriov_enabled(adapter))
1791 return -EPERM;
1792
1793 if (vf >= adapter->num_vfs)
1794 return -EINVAL;
1795
1796 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301797 if (status) {
1798 dev_err(&adapter->pdev->dev,
1799 "Link state change on VF %d failed: %#x\n", vf, status);
1800 return be_cmd_status(status);
1801 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301802
Kalesh APabccf232014-07-17 16:20:24 +05301803 adapter->vf_cfg[vf].plink_tracking = link_state;
1804
1805 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301806}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001807
Kalesh APe7bcbd72015-05-06 05:30:32 -04001808static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1809{
1810 struct be_adapter *adapter = netdev_priv(netdev);
1811 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1812 u8 spoofchk;
1813 int status;
1814
1815 if (!sriov_enabled(adapter))
1816 return -EPERM;
1817
1818 if (vf >= adapter->num_vfs)
1819 return -EINVAL;
1820
1821 if (BEx_chip(adapter))
1822 return -EOPNOTSUPP;
1823
1824 if (enable == vf_cfg->spoofchk)
1825 return 0;
1826
1827 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1828
1829 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1830 0, spoofchk);
1831 if (status) {
1832 dev_err(&adapter->pdev->dev,
1833 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1834 return be_cmd_status(status);
1835 }
1836
1837 vf_cfg->spoofchk = enable;
1838 return 0;
1839}
1840
Sathya Perla2632baf2013-10-01 16:00:00 +05301841static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1842 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001843{
Sathya Perla2632baf2013-10-01 16:00:00 +05301844 aic->rx_pkts_prev = rx_pkts;
1845 aic->tx_reqs_prev = tx_pkts;
1846 aic->jiffies = now;
1847}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001848
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001849static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301850{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001851 struct be_adapter *adapter = eqo->adapter;
1852 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301853 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301854 struct be_rx_obj *rxo;
1855 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001856 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301857 ulong now;
1858 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001859 int i;
1860
1861 aic = &adapter->aic_obj[eqo->idx];
1862 if (!aic->enable) {
1863 if (aic->jiffies)
1864 aic->jiffies = 0;
1865 eqd = aic->et_eqd;
1866 return eqd;
1867 }
1868
1869 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1870 do {
1871 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1872 rx_pkts += rxo->stats.rx_pkts;
1873 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1874 }
1875
1876 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1877 do {
1878 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1879 tx_pkts += txo->stats.tx_reqs;
1880 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1881 }
1882
1883 /* Skip, if wrapped around or first calculation */
1884 now = jiffies;
1885 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1886 rx_pkts < aic->rx_pkts_prev ||
1887 tx_pkts < aic->tx_reqs_prev) {
1888 be_aic_update(aic, rx_pkts, tx_pkts, now);
1889 return aic->prev_eqd;
1890 }
1891
1892 delta = jiffies_to_msecs(now - aic->jiffies);
1893 if (delta == 0)
1894 return aic->prev_eqd;
1895
1896 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1897 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1898 eqd = (pps / 15000) << 2;
1899
1900 if (eqd < 8)
1901 eqd = 0;
1902 eqd = min_t(u32, eqd, aic->max_eqd);
1903 eqd = max_t(u32, eqd, aic->min_eqd);
1904
1905 be_aic_update(aic, rx_pkts, tx_pkts, now);
1906
1907 return eqd;
1908}
1909
1910/* For Skyhawk-R only */
1911static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1912{
1913 struct be_adapter *adapter = eqo->adapter;
1914 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1915 ulong now = jiffies;
1916 int eqd;
1917 u32 mult_enc;
1918
1919 if (!aic->enable)
1920 return 0;
1921
1922 if (time_before_eq(now, aic->jiffies) ||
1923 jiffies_to_msecs(now - aic->jiffies) < 1)
1924 eqd = aic->prev_eqd;
1925 else
1926 eqd = be_get_new_eqd(eqo);
1927
1928 if (eqd > 100)
1929 mult_enc = R2I_DLY_ENC_1;
1930 else if (eqd > 60)
1931 mult_enc = R2I_DLY_ENC_2;
1932 else if (eqd > 20)
1933 mult_enc = R2I_DLY_ENC_3;
1934 else
1935 mult_enc = R2I_DLY_ENC_0;
1936
1937 aic->prev_eqd = eqd;
1938
1939 return mult_enc;
1940}
1941
1942void be_eqd_update(struct be_adapter *adapter, bool force_update)
1943{
1944 struct be_set_eqd set_eqd[MAX_EVT_QS];
1945 struct be_aic_obj *aic;
1946 struct be_eq_obj *eqo;
1947 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001948
Sathya Perla2632baf2013-10-01 16:00:00 +05301949 for_all_evt_queues(adapter, eqo, i) {
1950 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001951 eqd = be_get_new_eqd(eqo);
1952 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301953 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1954 set_eqd[num].eq_id = eqo->q.id;
1955 aic->prev_eqd = eqd;
1956 num++;
1957 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001958 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301959
1960 if (num)
1961 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001962}
1963
Sathya Perla3abcded2010-10-03 22:12:27 -07001964static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301965 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001966{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001967 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001968
Sathya Perlaab1594e2011-07-25 19:10:15 +00001969 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001970 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001971 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001972 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05301973 if (rxcp->tunneled)
1974 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001975 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001976 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001977 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001978 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001979 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001980}
1981
Sathya Perla2e588f82011-03-11 02:49:26 +00001982static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001983{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001984 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301985 * Also ignore ipcksm for ipv6 pkts
1986 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001987 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301988 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001989}
1990
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301991static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001993 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001995 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301996 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001997
Sathya Perla3abcded2010-10-03 22:12:27 -07001998 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001999 BUG_ON(!rx_page_info->page);
2000
Sathya Perlae50287b2014-03-04 12:14:38 +05302001 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002002 dma_unmap_page(&adapter->pdev->dev,
2003 dma_unmap_addr(rx_page_info, bus),
2004 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302005 rx_page_info->last_frag = false;
2006 } else {
2007 dma_sync_single_for_cpu(&adapter->pdev->dev,
2008 dma_unmap_addr(rx_page_info, bus),
2009 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002010 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002011
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302012 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013 atomic_dec(&rxq->used);
2014 return rx_page_info;
2015}
2016
2017/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002018static void be_rx_compl_discard(struct be_rx_obj *rxo,
2019 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002022 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002024 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302025 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002026 put_page(page_info->page);
2027 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028 }
2029}
2030
2031/*
2032 * skb_fill_rx_data forms a complete skb for an ether frame
2033 * indicated by rxcp.
2034 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002035static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2036 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002039 u16 i, j;
2040 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041 u8 *start;
2042
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302043 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002044 start = page_address(page_info->page) + page_info->page_offset;
2045 prefetch(start);
2046
2047 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002048 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002050 skb->len = curr_frag_len;
2051 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002052 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053 /* Complete packet has now been moved to data */
2054 put_page(page_info->page);
2055 skb->data_len = 0;
2056 skb->tail += curr_frag_len;
2057 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002058 hdr_len = ETH_HLEN;
2059 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002061 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002062 skb_shinfo(skb)->frags[0].page_offset =
2063 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302064 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2065 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002067 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068 skb->tail += hdr_len;
2069 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002070 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071
Sathya Perla2e588f82011-03-11 02:49:26 +00002072 if (rxcp->pkt_size <= rx_frag_size) {
2073 BUG_ON(rxcp->num_rcvd != 1);
2074 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002075 }
2076
2077 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002078 remaining = rxcp->pkt_size - curr_frag_len;
2079 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302080 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002081 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002082
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002083 /* Coalesce all frags from the same physical page in one slot */
2084 if (page_info->page_offset == 0) {
2085 /* Fresh page */
2086 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002087 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002088 skb_shinfo(skb)->frags[j].page_offset =
2089 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002090 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002091 skb_shinfo(skb)->nr_frags++;
2092 } else {
2093 put_page(page_info->page);
2094 }
2095
Eric Dumazet9e903e02011-10-18 21:00:24 +00002096 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002097 skb->len += curr_frag_len;
2098 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002099 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002100 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002101 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002102 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002103 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002104}
2105
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002106/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302107static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002108 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002109{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002110 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002111 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002112 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002113
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002114 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002115 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002116 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002117 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002118 return;
2119 }
2120
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002121 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002123 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002124 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002125 else
2126 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002127
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002128 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002129 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002130 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002131 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302132
Tom Herbertb6c0e892014-08-27 21:27:17 -07002133 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302134 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135
Jiri Pirko343e43c2011-08-25 02:50:51 +00002136 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002137 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002138
2139 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002140}
2141
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002142/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002143static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2144 struct napi_struct *napi,
2145 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002146{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002147 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002148 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002149 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002150 u16 remaining, curr_frag_len;
2151 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002152
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002154 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002155 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002156 return;
2157 }
2158
Sathya Perla2e588f82011-03-11 02:49:26 +00002159 remaining = rxcp->pkt_size;
2160 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302161 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002162
2163 curr_frag_len = min(remaining, rx_frag_size);
2164
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002165 /* Coalesce all frags from the same physical page in one slot */
2166 if (i == 0 || page_info->page_offset == 0) {
2167 /* First frag or Fresh page */
2168 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002169 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002170 skb_shinfo(skb)->frags[j].page_offset =
2171 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002172 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002173 } else {
2174 put_page(page_info->page);
2175 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002176 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002177 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002178 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179 memset(page_info, 0, sizeof(*page_info));
2180 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002181 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002182
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002183 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002184 skb->len = rxcp->pkt_size;
2185 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002186 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002187 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002188 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002189 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302190
Tom Herbertb6c0e892014-08-27 21:27:17 -07002191 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002192
Jiri Pirko343e43c2011-08-25 02:50:51 +00002193 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002194 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002195
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197}
2198
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002199static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2200 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002201{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302202 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2203 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2204 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2205 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2206 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2207 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2208 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2209 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2210 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2211 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2212 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002213 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302214 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2215 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002216 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302217 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302218 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302219 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002220}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002222static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2223 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002224{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302225 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2226 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2227 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2228 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2229 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2230 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2231 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2232 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2233 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2234 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2235 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002236 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302237 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2238 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002239 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302240 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2241 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002242}
2243
2244static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2245{
2246 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2247 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2248 struct be_adapter *adapter = rxo->adapter;
2249
2250 /* For checking the valid bit it is Ok to use either definition as the
2251 * valid bit is at the same position in both v0 and v1 Rx compl */
2252 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002253 return NULL;
2254
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002255 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002256 be_dws_le_to_cpu(compl, sizeof(*compl));
2257
2258 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002259 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002260 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002262
Somnath Koture38b1702013-05-29 22:55:56 +00002263 if (rxcp->ip_frag)
2264 rxcp->l4_csum = 0;
2265
Sathya Perla15d72182011-03-21 20:49:26 +00002266 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302267 /* In QNQ modes, if qnq bit is not set, then the packet was
2268 * tagged only with the transparent outer vlan-tag and must
2269 * not be treated as a vlan packet by host
2270 */
2271 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002272 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002273
Sathya Perla15d72182011-03-21 20:49:26 +00002274 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002275 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002276
Somnath Kotur939cf302011-08-18 21:51:49 -07002277 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302278 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002279 rxcp->vlanf = 0;
2280 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002281
2282 /* As the compl has been parsed, reset it; we wont touch it again */
2283 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284
Sathya Perla3abcded2010-10-03 22:12:27 -07002285 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286 return rxcp;
2287}
2288
Eric Dumazet1829b082011-03-01 05:48:12 +00002289static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002291 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002292
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002294 gfp |= __GFP_COMP;
2295 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296}
2297
2298/*
2299 * Allocate a page, split it to fragments of size rx_frag_size and post as
2300 * receive buffers to BE
2301 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302302static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303{
Sathya Perla3abcded2010-10-03 22:12:27 -07002304 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002305 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002306 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002308 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 struct be_eth_rx_d *rxd;
2310 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302311 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302314 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002315 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002316 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002317 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002318 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002319 break;
2320 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002321 page_dmaaddr = dma_map_page(dev, pagep, 0,
2322 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002323 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002324 if (dma_mapping_error(dev, page_dmaaddr)) {
2325 put_page(pagep);
2326 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302327 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002328 break;
2329 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302330 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331 } else {
2332 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302333 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302335 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002336 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337
2338 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302339 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2341 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342
2343 /* Any space left in the current big page for another frag? */
2344 if ((page_offset + rx_frag_size + rx_frag_size) >
2345 adapter->big_page_size) {
2346 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302347 page_info->last_frag = true;
2348 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2349 } else {
2350 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002352
2353 prev_page_info = page_info;
2354 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002355 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002356 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302357
2358 /* Mark the last frag of a page when we break out of the above loop
2359 * with no more slots available in the RXQ
2360 */
2361 if (pagep) {
2362 prev_page_info->last_frag = true;
2363 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2364 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365
2366 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302368 if (rxo->rx_post_starved)
2369 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302370 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002371 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302372 be_rxq_notify(adapter, rxq->id, notify);
2373 posted -= notify;
2374 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002375 } else if (atomic_read(&rxq->used) == 0) {
2376 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002377 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379}
2380
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302381static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302383 struct be_queue_info *tx_cq = &txo->cq;
2384 struct be_tx_compl_info *txcp = &txo->txcp;
2385 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302387 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388 return NULL;
2389
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302390 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002391 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302392 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302394 txcp->status = GET_TX_COMPL_BITS(status, compl);
2395 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002396
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302397 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002398 queue_tail_inc(tx_cq);
2399 return txcp;
2400}
2401
Sathya Perla3c8def92011-06-12 20:01:58 +00002402static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302403 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002404{
Sathya Perla3c8def92011-06-12 20:01:58 +00002405 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002406 struct be_queue_info *txq = &txo->q;
2407 u16 frag_index, num_wrbs = 0;
2408 struct sk_buff *skb = NULL;
2409 bool unmap_skb_hdr = false;
2410 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002411
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002412 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002413 if (sent_skbs[txq->tail]) {
2414 /* Free skb from prev req */
2415 if (skb)
2416 dev_consume_skb_any(skb);
2417 skb = sent_skbs[txq->tail];
2418 sent_skbs[txq->tail] = NULL;
2419 queue_tail_inc(txq); /* skip hdr wrb */
2420 num_wrbs++;
2421 unmap_skb_hdr = true;
2422 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002423 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002424 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002425 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002426 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002427 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002428 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002429 num_wrbs++;
2430 } while (frag_index != last_index);
2431 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002432
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002433 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002434}
2435
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002436/* Return the number of events in the event queue */
2437static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002438{
2439 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002441
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002442 do {
2443 eqe = queue_tail_node(&eqo->q);
2444 if (eqe->evt == 0)
2445 break;
2446
2447 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002448 eqe->evt = 0;
2449 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002450 queue_tail_inc(&eqo->q);
2451 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002452
2453 return num;
2454}
2455
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002456/* Leaves the EQ is disarmed state */
2457static void be_eq_clean(struct be_eq_obj *eqo)
2458{
2459 int num = events_get(eqo);
2460
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002461 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002462}
2463
Kalesh AP99b44302015-08-05 03:27:49 -04002464/* Free posted rx buffers that were not used */
2465static void be_rxq_clean(struct be_rx_obj *rxo)
2466{
2467 struct be_queue_info *rxq = &rxo->q;
2468 struct be_rx_page_info *page_info;
2469
2470 while (atomic_read(&rxq->used) > 0) {
2471 page_info = get_rx_page_info(rxo);
2472 put_page(page_info->page);
2473 memset(page_info, 0, sizeof(*page_info));
2474 }
2475 BUG_ON(atomic_read(&rxq->used));
2476 rxq->tail = 0;
2477 rxq->head = 0;
2478}
2479
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002480static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481{
Sathya Perla3abcded2010-10-03 22:12:27 -07002482 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002483 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002484 struct be_adapter *adapter = rxo->adapter;
2485 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002486
Sathya Perlad23e9462012-12-17 19:38:51 +00002487 /* Consume pending rx completions.
2488 * Wait for the flush completion (identified by zero num_rcvd)
2489 * to arrive. Notify CQ even when there are no more CQ entries
2490 * for HW to flush partially coalesced CQ entries.
2491 * In Lancer, there is no need to wait for flush compl.
2492 */
2493 for (;;) {
2494 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302495 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002496 if (lancer_chip(adapter))
2497 break;
2498
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302499 if (flush_wait++ > 50 ||
2500 be_check_error(adapter,
2501 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002502 dev_warn(&adapter->pdev->dev,
2503 "did not receive flush compl\n");
2504 break;
2505 }
2506 be_cq_notify(adapter, rx_cq->id, true, 0);
2507 mdelay(1);
2508 } else {
2509 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002510 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002511 if (rxcp->num_rcvd == 0)
2512 break;
2513 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002514 }
2515
Sathya Perlad23e9462012-12-17 19:38:51 +00002516 /* After cleanup, leave the CQ in unarmed state */
2517 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002518}
2519
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002520static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002521{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002522 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2523 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302524 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002525 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302526 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002527 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002528
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302529 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002530 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002531 pending_txqs = adapter->num_tx_qs;
2532
2533 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302534 cmpl = 0;
2535 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002536 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302537 while ((txcp = be_tx_compl_get(txo))) {
2538 num_wrbs +=
2539 be_tx_compl_process(adapter, txo,
2540 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002541 cmpl++;
2542 }
2543 if (cmpl) {
2544 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2545 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302546 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002547 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302548 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002549 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002550 }
2551
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302552 if (pending_txqs == 0 || ++timeo > 10 ||
2553 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002554 break;
2555
2556 mdelay(1);
2557 } while (true);
2558
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002559 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002560 for_all_tx_queues(adapter, txo, i) {
2561 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002562
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002563 if (atomic_read(&txq->used)) {
2564 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2565 i, atomic_read(&txq->used));
2566 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002567 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002568 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2569 txq->len);
2570 /* Use the tx-compl process logic to handle requests
2571 * that were not sent to the HW.
2572 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002573 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2574 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002575 BUG_ON(atomic_read(&txq->used));
2576 txo->pend_wrb_cnt = 0;
2577 /* Since hw was never notified of these requests,
2578 * reset TXQ indices
2579 */
2580 txq->head = notified_idx;
2581 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002582 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002583 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002584}
2585
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002586static void be_evt_queues_destroy(struct be_adapter *adapter)
2587{
2588 struct be_eq_obj *eqo;
2589 int i;
2590
2591 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002592 if (eqo->q.created) {
2593 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002594 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302595 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302596 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002597 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002598 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002599 be_queue_free(adapter, &eqo->q);
2600 }
2601}
2602
2603static int be_evt_queues_create(struct be_adapter *adapter)
2604{
2605 struct be_queue_info *eq;
2606 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302607 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002608 int i, rc;
2609
Sathya Perla92bf14a2013-08-27 16:57:32 +05302610 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2611 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002612
2613 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302614 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002615
Sathya Perla2632baf2013-10-01 16:00:00 +05302616 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002617 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002618 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302619 aic->max_eqd = BE_MAX_EQD;
2620 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002621
2622 eq = &eqo->q;
2623 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302624 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002625 if (rc)
2626 return rc;
2627
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302628 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002629 if (rc)
2630 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002631
2632 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2633 return -ENOMEM;
2634 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2635 eqo->affinity_mask);
2636 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2637 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002638 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002639 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002640}
2641
Sathya Perla5fb379e2009-06-18 00:02:59 +00002642static void be_mcc_queues_destroy(struct be_adapter *adapter)
2643{
2644 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002645
Sathya Perla8788fdc2009-07-27 22:52:03 +00002646 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002647 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002648 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002649 be_queue_free(adapter, q);
2650
Sathya Perla8788fdc2009-07-27 22:52:03 +00002651 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002652 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002653 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002654 be_queue_free(adapter, q);
2655}
2656
2657/* Must be called only after TX qs are created as MCC shares TX EQ */
2658static int be_mcc_queues_create(struct be_adapter *adapter)
2659{
2660 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002661
Sathya Perla8788fdc2009-07-27 22:52:03 +00002662 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002663 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302664 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002665 goto err;
2666
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002667 /* Use the default EQ for MCC completions */
2668 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002669 goto mcc_cq_free;
2670
Sathya Perla8788fdc2009-07-27 22:52:03 +00002671 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002672 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2673 goto mcc_cq_destroy;
2674
Sathya Perla8788fdc2009-07-27 22:52:03 +00002675 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002676 goto mcc_q_free;
2677
2678 return 0;
2679
2680mcc_q_free:
2681 be_queue_free(adapter, q);
2682mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002683 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002684mcc_cq_free:
2685 be_queue_free(adapter, cq);
2686err:
2687 return -1;
2688}
2689
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002690static void be_tx_queues_destroy(struct be_adapter *adapter)
2691{
2692 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002693 struct be_tx_obj *txo;
2694 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002695
Sathya Perla3c8def92011-06-12 20:01:58 +00002696 for_all_tx_queues(adapter, txo, i) {
2697 q = &txo->q;
2698 if (q->created)
2699 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2700 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701
Sathya Perla3c8def92011-06-12 20:01:58 +00002702 q = &txo->cq;
2703 if (q->created)
2704 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2705 be_queue_free(adapter, q);
2706 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002707}
2708
Sathya Perla77071332013-08-27 16:57:34 +05302709static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710{
Sathya Perla73f394e2015-03-26 03:05:09 -04002711 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002712 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002713 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302714 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002715
Sathya Perla92bf14a2013-08-27 16:57:32 +05302716 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002717
Sathya Perla3c8def92011-06-12 20:01:58 +00002718 for_all_tx_queues(adapter, txo, i) {
2719 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002720 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2721 sizeof(struct be_eth_tx_compl));
2722 if (status)
2723 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002724
John Stultz827da442013-10-07 15:51:58 -07002725 u64_stats_init(&txo->stats.sync);
2726 u64_stats_init(&txo->stats.sync_compl);
2727
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 /* If num_evt_qs is less than num_tx_qs, then more than
2729 * one txq share an eq
2730 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002731 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2732 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002733 if (status)
2734 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002735
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002736 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2737 sizeof(struct be_eth_wrb));
2738 if (status)
2739 return status;
2740
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002741 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002742 if (status)
2743 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002744
2745 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2746 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002747 }
2748
Sathya Perlad3791422012-09-28 04:39:44 +00002749 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2750 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002751 return 0;
2752}
2753
2754static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002755{
2756 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002757 struct be_rx_obj *rxo;
2758 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002759
Sathya Perla3abcded2010-10-03 22:12:27 -07002760 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002761 q = &rxo->cq;
2762 if (q->created)
2763 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2764 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002765 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002766}
2767
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002768static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002769{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002770 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002771 struct be_rx_obj *rxo;
2772 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002773
Sathya Perla92bf14a2013-08-27 16:57:32 +05302774 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002775 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302776
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002777 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2778 if (adapter->num_rss_qs <= 1)
2779 adapter->num_rss_qs = 0;
2780
2781 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2782
2783 /* When the interface is not capable of RSS rings (and there is no
2784 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002785 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002786 if (adapter->num_rx_qs == 0)
2787 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302788
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002789 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002790 for_all_rx_queues(adapter, rxo, i) {
2791 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002792 cq = &rxo->cq;
2793 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302794 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002795 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002796 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002797
John Stultz827da442013-10-07 15:51:58 -07002798 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002799 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2800 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002801 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002802 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002803 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002804
Sathya Perlad3791422012-09-28 04:39:44 +00002805 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002806 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002807 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002808}
2809
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002810static irqreturn_t be_intx(int irq, void *dev)
2811{
Sathya Perlae49cc342012-11-27 19:50:02 +00002812 struct be_eq_obj *eqo = dev;
2813 struct be_adapter *adapter = eqo->adapter;
2814 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002815
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002816 /* IRQ is not expected when NAPI is scheduled as the EQ
2817 * will not be armed.
2818 * But, this can happen on Lancer INTx where it takes
2819 * a while to de-assert INTx or in BE2 where occasionaly
2820 * an interrupt may be raised even when EQ is unarmed.
2821 * If NAPI is already scheduled, then counting & notifying
2822 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002823 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002824 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002825 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002826 __napi_schedule(&eqo->napi);
2827 if (num_evts)
2828 eqo->spurious_intr = 0;
2829 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002830 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002831
2832 /* Return IRQ_HANDLED only for the the first spurious intr
2833 * after a valid intr to stop the kernel from branding
2834 * this irq as a bad one!
2835 */
2836 if (num_evts || eqo->spurious_intr++ == 0)
2837 return IRQ_HANDLED;
2838 else
2839 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002840}
2841
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002842static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002843{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002844 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002845
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002846 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002847 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002848 return IRQ_HANDLED;
2849}
2850
Sathya Perla2e588f82011-03-11 02:49:26 +00002851static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002852{
Somnath Koture38b1702013-05-29 22:55:56 +00002853 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002854}
2855
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002856static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302857 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002858{
Sathya Perla3abcded2010-10-03 22:12:27 -07002859 struct be_adapter *adapter = rxo->adapter;
2860 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002861 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002862 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302863 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002864
2865 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002866 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002867 if (!rxcp)
2868 break;
2869
Sathya Perla12004ae2011-08-02 19:57:46 +00002870 /* Is it a flush compl that has no data */
2871 if (unlikely(rxcp->num_rcvd == 0))
2872 goto loop_continue;
2873
2874 /* Discard compl with partial DMA Lancer B0 */
2875 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002876 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002877 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002878 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002879
Sathya Perla12004ae2011-08-02 19:57:46 +00002880 /* On BE drop pkts that arrive due to imperfect filtering in
2881 * promiscuous mode on some skews
2882 */
2883 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302884 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002885 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002886 goto loop_continue;
2887 }
2888
Sathya Perla6384a4d2013-10-25 10:40:16 +05302889 /* Don't do gro when we're busy_polling */
2890 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002891 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002892 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302893 be_rx_compl_process(rxo, napi, rxcp);
2894
Sathya Perla12004ae2011-08-02 19:57:46 +00002895loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302896 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002897 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002898 }
2899
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002900 if (work_done) {
2901 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002902
Sathya Perla6384a4d2013-10-25 10:40:16 +05302903 /* When an rx-obj gets into post_starved state, just
2904 * let be_worker do the posting.
2905 */
2906 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2907 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302908 be_post_rx_frags(rxo, GFP_ATOMIC,
2909 max_t(u32, MAX_RX_POST,
2910 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002911 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002912
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002913 return work_done;
2914}
2915
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302916static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302917{
2918 switch (status) {
2919 case BE_TX_COMP_HDR_PARSE_ERR:
2920 tx_stats(txo)->tx_hdr_parse_err++;
2921 break;
2922 case BE_TX_COMP_NDMA_ERR:
2923 tx_stats(txo)->tx_dma_err++;
2924 break;
2925 case BE_TX_COMP_ACL_ERR:
2926 tx_stats(txo)->tx_spoof_check_err++;
2927 break;
2928 }
2929}
2930
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302931static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302932{
2933 switch (status) {
2934 case LANCER_TX_COMP_LSO_ERR:
2935 tx_stats(txo)->tx_tso_err++;
2936 break;
2937 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2938 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2939 tx_stats(txo)->tx_spoof_check_err++;
2940 break;
2941 case LANCER_TX_COMP_QINQ_ERR:
2942 tx_stats(txo)->tx_qinq_err++;
2943 break;
2944 case LANCER_TX_COMP_PARITY_ERR:
2945 tx_stats(txo)->tx_internal_parity_err++;
2946 break;
2947 case LANCER_TX_COMP_DMA_ERR:
2948 tx_stats(txo)->tx_dma_err++;
2949 break;
2950 }
2951}
2952
Sathya Perlac8f64612014-09-02 09:56:55 +05302953static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2954 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002955{
Sathya Perlac8f64612014-09-02 09:56:55 +05302956 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302957 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002958
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302959 while ((txcp = be_tx_compl_get(txo))) {
2960 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302961 work_done++;
2962
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302963 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302964 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302965 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302966 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302967 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302968 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002969 }
2970
2971 if (work_done) {
2972 be_cq_notify(adapter, txo->cq.id, true, work_done);
2973 atomic_sub(num_wrbs, &txo->q.used);
2974
2975 /* As Tx wrbs have been freed up, wake up netdev queue
2976 * if it was stopped due to lack of tx wrbs. */
2977 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302978 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002979 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002980 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002981
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002982 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2983 tx_stats(txo)->tx_compl += work_done;
2984 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2985 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002986}
Sathya Perla3c8def92011-06-12 20:01:58 +00002987
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002988#ifdef CONFIG_NET_RX_BUSY_POLL
2989static inline bool be_lock_napi(struct be_eq_obj *eqo)
2990{
2991 bool status = true;
2992
2993 spin_lock(&eqo->lock); /* BH is already disabled */
2994 if (eqo->state & BE_EQ_LOCKED) {
2995 WARN_ON(eqo->state & BE_EQ_NAPI);
2996 eqo->state |= BE_EQ_NAPI_YIELD;
2997 status = false;
2998 } else {
2999 eqo->state = BE_EQ_NAPI;
3000 }
3001 spin_unlock(&eqo->lock);
3002 return status;
3003}
3004
3005static inline void be_unlock_napi(struct be_eq_obj *eqo)
3006{
3007 spin_lock(&eqo->lock); /* BH is already disabled */
3008
3009 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3010 eqo->state = BE_EQ_IDLE;
3011
3012 spin_unlock(&eqo->lock);
3013}
3014
3015static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3016{
3017 bool status = true;
3018
3019 spin_lock_bh(&eqo->lock);
3020 if (eqo->state & BE_EQ_LOCKED) {
3021 eqo->state |= BE_EQ_POLL_YIELD;
3022 status = false;
3023 } else {
3024 eqo->state |= BE_EQ_POLL;
3025 }
3026 spin_unlock_bh(&eqo->lock);
3027 return status;
3028}
3029
3030static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3031{
3032 spin_lock_bh(&eqo->lock);
3033
3034 WARN_ON(eqo->state & (BE_EQ_NAPI));
3035 eqo->state = BE_EQ_IDLE;
3036
3037 spin_unlock_bh(&eqo->lock);
3038}
3039
3040static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3041{
3042 spin_lock_init(&eqo->lock);
3043 eqo->state = BE_EQ_IDLE;
3044}
3045
3046static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3047{
3048 local_bh_disable();
3049
3050 /* It's enough to just acquire napi lock on the eqo to stop
3051 * be_busy_poll() from processing any queueus.
3052 */
3053 while (!be_lock_napi(eqo))
3054 mdelay(1);
3055
3056 local_bh_enable();
3057}
3058
3059#else /* CONFIG_NET_RX_BUSY_POLL */
3060
3061static inline bool be_lock_napi(struct be_eq_obj *eqo)
3062{
3063 return true;
3064}
3065
3066static inline void be_unlock_napi(struct be_eq_obj *eqo)
3067{
3068}
3069
3070static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3071{
3072 return false;
3073}
3074
3075static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3076{
3077}
3078
3079static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3080{
3081}
3082
3083static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3084{
3085}
3086#endif /* CONFIG_NET_RX_BUSY_POLL */
3087
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303088int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003089{
3090 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3091 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003092 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303093 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303094 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003095 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003096
Sathya Perla0b545a62012-11-23 00:27:18 +00003097 num_evts = events_get(eqo);
3098
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303099 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3100 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003101
Sathya Perla6384a4d2013-10-25 10:40:16 +05303102 if (be_lock_napi(eqo)) {
3103 /* This loop will iterate twice for EQ0 in which
3104 * completions of the last RXQ (default one) are also processed
3105 * For other EQs the loop iterates only once
3106 */
3107 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3108 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3109 max_work = max(work, max_work);
3110 }
3111 be_unlock_napi(eqo);
3112 } else {
3113 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003114 }
3115
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003116 if (is_mcc_eqo(eqo))
3117 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003118
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003119 if (max_work < budget) {
3120 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003121
3122 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3123 * delay via a delay multiplier encoding value
3124 */
3125 if (skyhawk_chip(adapter))
3126 mult_enc = be_get_eq_delay_mult_enc(eqo);
3127
3128 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3129 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003130 } else {
3131 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003132 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003133 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003134 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003135}
3136
Sathya Perla6384a4d2013-10-25 10:40:16 +05303137#ifdef CONFIG_NET_RX_BUSY_POLL
3138static int be_busy_poll(struct napi_struct *napi)
3139{
3140 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3141 struct be_adapter *adapter = eqo->adapter;
3142 struct be_rx_obj *rxo;
3143 int i, work = 0;
3144
3145 if (!be_lock_busy_poll(eqo))
3146 return LL_FLUSH_BUSY;
3147
3148 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3149 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3150 if (work)
3151 break;
3152 }
3153
3154 be_unlock_busy_poll(eqo);
3155 return work;
3156}
3157#endif
3158
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003159void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003160{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003161 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3162 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003163 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303164 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003165
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303166 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003167 return;
3168
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003169 if (lancer_chip(adapter)) {
3170 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3171 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303172 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003173 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303174 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003175 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303176 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303177 /* Do not log error messages if its a FW reset */
3178 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3179 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3180 dev_info(dev, "Firmware update in progress\n");
3181 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303182 dev_err(dev, "Error detected in the card\n");
3183 dev_err(dev, "ERR: sliport status 0x%x\n",
3184 sliport_status);
3185 dev_err(dev, "ERR: sliport error1 0x%x\n",
3186 sliport_err1);
3187 dev_err(dev, "ERR: sliport error2 0x%x\n",
3188 sliport_err2);
3189 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003190 }
3191 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003192 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3193 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3194 ue_lo_mask = ioread32(adapter->pcicfg +
3195 PCICFG_UE_STATUS_LOW_MASK);
3196 ue_hi_mask = ioread32(adapter->pcicfg +
3197 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003198
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003199 ue_lo = (ue_lo & ~ue_lo_mask);
3200 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003201
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303202 /* On certain platforms BE hardware can indicate spurious UEs.
3203 * Allow HW to stop working completely in case of a real UE.
3204 * Hence not setting the hw_error for UE detection.
3205 */
3206
3207 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303208 dev_err(dev,
3209 "Unrecoverable Error detected in the adapter");
3210 dev_err(dev, "Please reboot server to recover");
3211 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303212 be_set_error(adapter, BE_ERROR_UE);
3213
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303214 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3215 if (ue_lo & 1)
3216 dev_err(dev, "UE: %s bit set\n",
3217 ue_status_low_desc[i]);
3218 }
3219 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3220 if (ue_hi & 1)
3221 dev_err(dev, "UE: %s bit set\n",
3222 ue_status_hi_desc[i]);
3223 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303224 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003225 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003226}
3227
Sathya Perla8d56ff12009-11-22 22:02:26 +00003228static void be_msix_disable(struct be_adapter *adapter)
3229{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003230 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003231 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003232 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303233 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003234 }
3235}
3236
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003237static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003239 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003240 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003241
Sathya Perla92bf14a2013-08-27 16:57:32 +05303242 /* If RoCE is supported, program the max number of NIC vectors that
3243 * may be configured via set-channels, along with vectors needed for
3244 * RoCe. Else, just program the number we'll use initially.
3245 */
3246 if (be_roce_supported(adapter))
3247 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3248 2 * num_online_cpus());
3249 else
3250 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003251
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003252 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003253 adapter->msix_entries[i].entry = i;
3254
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003255 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3256 MIN_MSIX_VECTORS, num_vec);
3257 if (num_vec < 0)
3258 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003259
Sathya Perla92bf14a2013-08-27 16:57:32 +05303260 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3261 adapter->num_msix_roce_vec = num_vec / 2;
3262 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3263 adapter->num_msix_roce_vec);
3264 }
3265
3266 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3267
3268 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3269 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003270 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003271
3272fail:
3273 dev_warn(dev, "MSIx enable failed\n");
3274
3275 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003276 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003277 return num_vec;
3278 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003279}
3280
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003281static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303282 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003283{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303284 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003285}
3286
3287static int be_msix_register(struct be_adapter *adapter)
3288{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003289 struct net_device *netdev = adapter->netdev;
3290 struct be_eq_obj *eqo;
3291 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003292
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003293 for_all_evt_queues(adapter, eqo, i) {
3294 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3295 vec = be_msix_vec_get(adapter, eqo);
3296 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003297 if (status)
3298 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003299
3300 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003301 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003302
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003303 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003304err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303305 for (i--; i >= 0; i--) {
3306 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003307 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303308 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003309 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303310 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003311 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003312 return status;
3313}
3314
3315static int be_irq_register(struct be_adapter *adapter)
3316{
3317 struct net_device *netdev = adapter->netdev;
3318 int status;
3319
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003320 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003321 status = be_msix_register(adapter);
3322 if (status == 0)
3323 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003324 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003325 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003326 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327 }
3328
Sathya Perlae49cc342012-11-27 19:50:02 +00003329 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003330 netdev->irq = adapter->pdev->irq;
3331 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003332 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003333 if (status) {
3334 dev_err(&adapter->pdev->dev,
3335 "INTx request IRQ failed - err %d\n", status);
3336 return status;
3337 }
3338done:
3339 adapter->isr_registered = true;
3340 return 0;
3341}
3342
3343static void be_irq_unregister(struct be_adapter *adapter)
3344{
3345 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003346 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003347 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003348
3349 if (!adapter->isr_registered)
3350 return;
3351
3352 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003353 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003354 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003355 goto done;
3356 }
3357
3358 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003359 for_all_evt_queues(adapter, eqo, i) {
3360 vec = be_msix_vec_get(adapter, eqo);
3361 irq_set_affinity_hint(vec, NULL);
3362 free_irq(vec, eqo);
3363 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003364
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003365done:
3366 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003367}
3368
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003369static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003370{
3371 struct be_queue_info *q;
3372 struct be_rx_obj *rxo;
3373 int i;
3374
3375 for_all_rx_queues(adapter, rxo, i) {
3376 q = &rxo->q;
3377 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003378 /* If RXQs are destroyed while in an "out of buffer"
3379 * state, there is a possibility of an HW stall on
3380 * Lancer. So, post 64 buffers to each queue to relieve
3381 * the "out of buffer" condition.
3382 * Make sure there's space in the RXQ before posting.
3383 */
3384 if (lancer_chip(adapter)) {
3385 be_rx_cq_clean(rxo);
3386 if (atomic_read(&q->used) == 0)
3387 be_post_rx_frags(rxo, GFP_KERNEL,
3388 MAX_RX_POST);
3389 }
3390
Sathya Perla482c9e72011-06-29 23:33:17 +00003391 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003392 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003393 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003394 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003395 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003396 }
3397}
3398
Kalesh APbcc84142015-08-05 03:27:48 -04003399static void be_disable_if_filters(struct be_adapter *adapter)
3400{
3401 be_cmd_pmac_del(adapter, adapter->if_handle,
3402 adapter->pmac_id[0], 0);
3403
3404 be_clear_uc_list(adapter);
3405
3406 /* The IFACE flags are enabled in the open path and cleared
3407 * in the close path. When a VF gets detached from the host and
3408 * assigned to a VM the following happens:
3409 * - VF's IFACE flags get cleared in the detach path
3410 * - IFACE create is issued by the VF in the attach path
3411 * Due to a bug in the BE3/Skyhawk-R FW
3412 * (Lancer FW doesn't have the bug), the IFACE capability flags
3413 * specified along with the IFACE create cmd issued by a VF are not
3414 * honoured by FW. As a consequence, if a *new* driver
3415 * (that enables/disables IFACE flags in open/close)
3416 * is loaded in the host and an *old* driver is * used by a VM/VF,
3417 * the IFACE gets created *without* the needed flags.
3418 * To avoid this, disable RX-filter flags only for Lancer.
3419 */
3420 if (lancer_chip(adapter)) {
3421 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3422 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3423 }
3424}
3425
Sathya Perla889cd4b2010-05-30 23:33:45 +00003426static int be_close(struct net_device *netdev)
3427{
3428 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003429 struct be_eq_obj *eqo;
3430 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003431
Kalesh APe1ad8e32014-04-14 16:12:41 +05303432 /* This protection is needed as be_close() may be called even when the
3433 * adapter is in cleared state (after eeh perm failure)
3434 */
3435 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3436 return 0;
3437
Kalesh APbcc84142015-08-05 03:27:48 -04003438 be_disable_if_filters(adapter);
3439
Ivan Veceradff345c52013-11-27 08:59:32 +01003440 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3441 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003442 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303443 be_disable_busy_poll(eqo);
3444 }
David S. Miller71237b62013-11-28 18:53:36 -05003445 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003446 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003447
3448 be_async_mcc_disable(adapter);
3449
3450 /* Wait for all pending tx completions to arrive so that
3451 * all tx skbs are freed.
3452 */
Sathya Perlafba87552013-05-08 02:05:50 +00003453 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303454 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003455
3456 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003457
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003458 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003459 if (msix_enabled(adapter))
3460 synchronize_irq(be_msix_vec_get(adapter, eqo));
3461 else
3462 synchronize_irq(netdev->irq);
3463 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003464 }
3465
Sathya Perla889cd4b2010-05-30 23:33:45 +00003466 be_irq_unregister(adapter);
3467
Sathya Perla482c9e72011-06-29 23:33:17 +00003468 return 0;
3469}
3470
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003471static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003472{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003473 struct rss_info *rss = &adapter->rss_info;
3474 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003475 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003476 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003477
3478 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003479 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3480 sizeof(struct be_eth_rx_d));
3481 if (rc)
3482 return rc;
3483 }
3484
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003485 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3486 rxo = default_rxo(adapter);
3487 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3488 rx_frag_size, adapter->if_handle,
3489 false, &rxo->rss_id);
3490 if (rc)
3491 return rc;
3492 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003493
3494 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003495 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003496 rx_frag_size, adapter->if_handle,
3497 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003498 if (rc)
3499 return rc;
3500 }
3501
3502 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003503 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003504 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303505 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003506 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303507 rss->rsstable[j + i] = rxo->rss_id;
3508 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003509 }
3510 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303511 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3512 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003513
3514 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303515 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3516 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303517 } else {
3518 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303519 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303520 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003521
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003522 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303523 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Ivan Vecerad5d30982015-11-13 11:36:58 +01003524 RSS_INDIR_TABLE_LEN, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303525 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303526 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303527 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003528 }
3529
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003530 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303531
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003532 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3533 * which is a queue empty condition
3534 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003535 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003536 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3537
Sathya Perla889cd4b2010-05-30 23:33:45 +00003538 return 0;
3539}
3540
Kalesh APbcc84142015-08-05 03:27:48 -04003541static int be_enable_if_filters(struct be_adapter *adapter)
3542{
3543 int status;
3544
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003545 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
Kalesh APbcc84142015-08-05 03:27:48 -04003546 if (status)
3547 return status;
3548
3549 /* For BE3 VFs, the PF programs the initial MAC address */
3550 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3551 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3552 adapter->if_handle,
3553 &adapter->pmac_id[0], 0);
3554 if (status)
3555 return status;
3556 }
3557
3558 if (adapter->vlans_added)
3559 be_vid_config(adapter);
3560
3561 be_set_rx_mode(adapter->netdev);
3562
3563 return 0;
3564}
3565
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003566static int be_open(struct net_device *netdev)
3567{
3568 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003569 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003570 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003571 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003572 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003573 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003574
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003575 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003576 if (status)
3577 goto err;
3578
Kalesh APbcc84142015-08-05 03:27:48 -04003579 status = be_enable_if_filters(adapter);
3580 if (status)
3581 goto err;
3582
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003583 status = be_irq_register(adapter);
3584 if (status)
3585 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003586
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003587 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003588 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003589
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003590 for_all_tx_queues(adapter, txo, i)
3591 be_cq_notify(adapter, txo->cq.id, true, 0);
3592
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003593 be_async_mcc_enable(adapter);
3594
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003595 for_all_evt_queues(adapter, eqo, i) {
3596 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303597 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003598 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003599 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003600 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003601
Sathya Perla323ff712012-09-28 04:39:43 +00003602 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003603 if (!status)
3604 be_link_status_update(adapter, link_status);
3605
Sathya Perlafba87552013-05-08 02:05:50 +00003606 netif_tx_start_all_queues(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303607#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303608 if (skyhawk_chip(adapter))
3609 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303610#endif
3611
Sathya Perla889cd4b2010-05-30 23:33:45 +00003612 return 0;
3613err:
3614 be_close(adapter->netdev);
3615 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003616}
3617
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003618static int be_setup_wol(struct be_adapter *adapter, bool enable)
3619{
Kalesh Purayil145155e2015-07-10 05:32:43 -04003620 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003621 struct be_dma_mem cmd;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003622 u8 mac[ETH_ALEN];
Kalesh Purayil145155e2015-07-10 05:32:43 -04003623 int status;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003624
Joe Perchesc7bf7162015-03-02 19:54:47 -08003625 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003626
3627 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Kalesh Purayil145155e2015-07-10 05:32:43 -04003628 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303629 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303630 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003631
3632 if (enable) {
3633 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303634 PCICFG_PM_CONTROL_OFFSET,
3635 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003636 if (status) {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003637 dev_err(dev, "Could not enable Wake-on-lan\n");
3638 goto err;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003639 }
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003640 } else {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003641 ether_addr_copy(mac, adapter->netdev->dev_addr);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003642 }
3643
Kalesh Purayil145155e2015-07-10 05:32:43 -04003644 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3645 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3646 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3647err:
3648 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003649 return status;
3650}
3651
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003652static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3653{
3654 u32 addr;
3655
3656 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3657
3658 mac[5] = (u8)(addr & 0xFF);
3659 mac[4] = (u8)((addr >> 8) & 0xFF);
3660 mac[3] = (u8)((addr >> 16) & 0xFF);
3661 /* Use the OUI from the current MAC address */
3662 memcpy(mac, adapter->netdev->dev_addr, 3);
3663}
3664
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003665/*
3666 * Generate a seed MAC address from the PF MAC Address using jhash.
3667 * MAC Address for VFs are assigned incrementally starting from the seed.
3668 * These addresses are programmed in the ASIC by the PF and the VF driver
3669 * queries for the MAC address during its probe.
3670 */
Sathya Perla4c876612013-02-03 20:30:11 +00003671static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003672{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003673 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003674 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003675 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003676 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003677
3678 be_vf_eth_addr_generate(adapter, mac);
3679
Sathya Perla11ac75e2011-12-13 00:58:50 +00003680 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303681 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003682 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003683 vf_cfg->if_handle,
3684 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303685 else
3686 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3687 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003688
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003689 if (status)
3690 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303691 "Mac address assignment failed for VF %d\n",
3692 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003693 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003694 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003695
3696 mac[5] += 1;
3697 }
3698 return status;
3699}
3700
Sathya Perla4c876612013-02-03 20:30:11 +00003701static int be_vfs_mac_query(struct be_adapter *adapter)
3702{
3703 int status, vf;
3704 u8 mac[ETH_ALEN];
3705 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003706
3707 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303708 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3709 mac, vf_cfg->if_handle,
3710 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003711 if (status)
3712 return status;
3713 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3714 }
3715 return 0;
3716}
3717
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003718static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003719{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003720 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003721 u32 vf;
3722
Sathya Perla257a3fe2013-06-14 15:54:51 +05303723 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003724 dev_warn(&adapter->pdev->dev,
3725 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003726 goto done;
3727 }
3728
Sathya Perlab4c1df92013-05-08 02:05:47 +00003729 pci_disable_sriov(adapter->pdev);
3730
Sathya Perla11ac75e2011-12-13 00:58:50 +00003731 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303732 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003733 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3734 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303735 else
3736 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3737 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003738
Sathya Perla11ac75e2011-12-13 00:58:50 +00003739 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3740 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003741done:
3742 kfree(adapter->vf_cfg);
3743 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303744 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003745}
3746
Sathya Perla77071332013-08-27 16:57:34 +05303747static void be_clear_queues(struct be_adapter *adapter)
3748{
3749 be_mcc_queues_destroy(adapter);
3750 be_rx_cqs_destroy(adapter);
3751 be_tx_queues_destroy(adapter);
3752 be_evt_queues_destroy(adapter);
3753}
3754
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303755static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003756{
Sathya Perla191eb752012-02-23 18:50:13 +00003757 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3758 cancel_delayed_work_sync(&adapter->work);
3759 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3760 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303761}
3762
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003763static void be_cancel_err_detection(struct be_adapter *adapter)
3764{
3765 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3766 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3767 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3768 }
3769}
3770
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303771#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303772static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3773{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003774 struct net_device *netdev = adapter->netdev;
3775
Sathya Perlac9c47142014-03-27 10:46:19 +05303776 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3777 be_cmd_manage_iface(adapter, adapter->if_handle,
3778 OP_CONVERT_TUNNEL_TO_NORMAL);
3779
3780 if (adapter->vxlan_port)
3781 be_cmd_set_vxlan_port(adapter, 0);
3782
3783 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3784 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003785
3786 netdev->hw_enc_features = 0;
3787 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303788 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303789}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303790#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303791
Vasundhara Volamf2858732015-03-04 00:44:33 -05003792static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3793{
3794 struct be_resources res = adapter->pool_res;
3795 u16 num_vf_qs = 1;
3796
3797 /* Distribute the queue resources equally among the PF and it's VFs
3798 * Do not distribute queue resources in multi-channel configuration.
3799 */
3800 if (num_vfs && !be_is_mc(adapter)) {
3801 /* If number of VFs requested is 8 less than max supported,
3802 * assign 8 queue pairs to the PF and divide the remaining
3803 * resources evenly among the VFs
3804 */
3805 if (num_vfs < (be_max_vfs(adapter) - 8))
3806 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3807 else
3808 num_vf_qs = res.max_rss_qs / num_vfs;
3809
3810 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3811 * interfaces per port. Provide RSS on VFs, only if number
3812 * of VFs requested is less than MAX_RSS_IFACES limit.
3813 */
3814 if (num_vfs >= MAX_RSS_IFACES)
3815 num_vf_qs = 1;
3816 }
3817 return num_vf_qs;
3818}
3819
Somnath Koturb05004a2013-12-05 12:08:16 +05303820static int be_clear(struct be_adapter *adapter)
3821{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003822 struct pci_dev *pdev = adapter->pdev;
3823 u16 num_vf_qs;
3824
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303825 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003826
Sathya Perla11ac75e2011-12-13 00:58:50 +00003827 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003828 be_vf_clear(adapter);
3829
Vasundhara Volambec84e62014-06-30 13:01:32 +05303830 /* Re-configure FW to distribute resources evenly across max-supported
3831 * number of VFs, only when VFs are not already enabled.
3832 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003833 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3834 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003835 num_vf_qs = be_calculate_vf_qs(adapter,
3836 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303837 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003838 pci_sriov_get_totalvfs(pdev),
3839 num_vf_qs);
3840 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303841
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303842#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303843 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303844#endif
Kalesh APbcc84142015-08-05 03:27:48 -04003845 kfree(adapter->pmac_id);
3846 adapter->pmac_id = NULL;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003847
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003848 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003849
Sathya Perla77071332013-08-27 16:57:34 +05303850 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003851
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003852 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303853 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003854 return 0;
3855}
3856
Sathya Perla4c876612013-02-03 20:30:11 +00003857static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003858{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303859 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04003860 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00003861 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003862 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003863
Kalesh AP0700d812015-01-20 03:51:43 -05003864 /* If a FW profile exists, then cap_flags are updated */
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003865 cap_flags = BE_VF_IF_EN_FLAGS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003866
Sathya Perla4c876612013-02-03 20:30:11 +00003867 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303868 if (!BE3_chip(adapter)) {
3869 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003870 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303871 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003872 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303873 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003874 /* Prevent VFs from enabling VLAN promiscuous
3875 * mode
3876 */
3877 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3878 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303879 }
Sathya Perla4c876612013-02-03 20:30:11 +00003880
Venkat Duvvuruc1bb0a52016-03-02 06:00:28 -05003881 /* PF should enable IF flags during proxy if_create call */
3882 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
Kalesh APbcc84142015-08-05 03:27:48 -04003883 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3884 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003885 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003886 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003887 }
Kalesh AP0700d812015-01-20 03:51:43 -05003888
3889 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003890}
3891
Sathya Perla39f1d942012-05-08 19:41:24 +00003892static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003893{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003894 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003895 int vf;
3896
Sathya Perla39f1d942012-05-08 19:41:24 +00003897 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3898 GFP_KERNEL);
3899 if (!adapter->vf_cfg)
3900 return -ENOMEM;
3901
Sathya Perla11ac75e2011-12-13 00:58:50 +00003902 for_all_vfs(adapter, vf_cfg, vf) {
3903 vf_cfg->if_handle = -1;
3904 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003905 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003906 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003907}
3908
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003909static int be_vf_setup(struct be_adapter *adapter)
3910{
Sathya Perla4c876612013-02-03 20:30:11 +00003911 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303912 struct be_vf_cfg *vf_cfg;
3913 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003914 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003915
Sathya Perla257a3fe2013-06-14 15:54:51 +05303916 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003917
3918 status = be_vf_setup_init(adapter);
3919 if (status)
3920 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003921
Sathya Perla4c876612013-02-03 20:30:11 +00003922 if (old_vfs) {
3923 for_all_vfs(adapter, vf_cfg, vf) {
3924 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3925 if (status)
3926 goto err;
3927 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003928
Sathya Perla4c876612013-02-03 20:30:11 +00003929 status = be_vfs_mac_query(adapter);
3930 if (status)
3931 goto err;
3932 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303933 status = be_vfs_if_create(adapter);
3934 if (status)
3935 goto err;
3936
Sathya Perla39f1d942012-05-08 19:41:24 +00003937 status = be_vf_eth_addr_config(adapter);
3938 if (status)
3939 goto err;
3940 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003941
Sathya Perla11ac75e2011-12-13 00:58:50 +00003942 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303943 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003944 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3945 vf + 1);
3946 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303947 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003948 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303949 BE_PRIV_FILTMGMT,
3950 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003951 if (!status) {
3952 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303953 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3954 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003955 }
Sathya Perla04a06022013-07-23 15:25:00 +05303956 }
3957
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303958 /* Allow full available bandwidth */
3959 if (!old_vfs)
3960 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003961
Kalesh APe7bcbd72015-05-06 05:30:32 -04003962 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3963 vf_cfg->if_handle, NULL,
3964 &spoofchk);
3965 if (!status)
3966 vf_cfg->spoofchk = spoofchk;
3967
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303968 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303969 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303970 be_cmd_set_logical_link_config(adapter,
3971 IFLA_VF_LINK_STATE_AUTO,
3972 vf+1);
3973 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003974 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003975
3976 if (!old_vfs) {
3977 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3978 if (status) {
3979 dev_err(dev, "SRIOV enable failed\n");
3980 adapter->num_vfs = 0;
3981 goto err;
3982 }
3983 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303984
3985 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003986 return 0;
3987err:
Sathya Perla4c876612013-02-03 20:30:11 +00003988 dev_err(dev, "VF setup failed\n");
3989 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003990 return status;
3991}
3992
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303993/* Converting function_mode bits on BE3 to SH mc_type enums */
3994
3995static u8 be_convert_mc_type(u32 function_mode)
3996{
Suresh Reddy66064db2014-06-23 16:41:29 +05303997 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303998 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303999 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304000 return FLEX10;
4001 else if (function_mode & VNIC_MODE)
4002 return vNIC2;
4003 else if (function_mode & UMC_ENABLED)
4004 return UMC;
4005 else
4006 return MC_NONE;
4007}
4008
Sathya Perla92bf14a2013-08-27 16:57:32 +05304009/* On BE2/BE3 FW does not suggest the supported limits */
4010static void BEx_get_resources(struct be_adapter *adapter,
4011 struct be_resources *res)
4012{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304013 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304014
4015 if (be_physfn(adapter))
4016 res->max_uc_mac = BE_UC_PMAC_COUNT;
4017 else
4018 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4019
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304020 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4021
4022 if (be_is_mc(adapter)) {
4023 /* Assuming that there are 4 channels per port,
4024 * when multi-channel is enabled
4025 */
4026 if (be_is_qnq_mode(adapter))
4027 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4028 else
4029 /* In a non-qnq multichannel mode, the pvid
4030 * takes up one vlan entry
4031 */
4032 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4033 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304034 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304035 }
4036
Sathya Perla92bf14a2013-08-27 16:57:32 +05304037 res->max_mcast_mac = BE_MAX_MC;
4038
Vasundhara Volama5243da2014-03-11 18:53:07 +05304039 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4040 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4041 * *only* if it is RSS-capable.
4042 */
4043 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004044 be_virtfn(adapter) ||
4045 (be_is_mc(adapter) &&
4046 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304047 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304048 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4049 struct be_resources super_nic_res = {0};
4050
4051 /* On a SuperNIC profile, the driver needs to use the
4052 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4053 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05004054 be_cmd_get_profile_config(adapter, &super_nic_res,
4055 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304056 /* Some old versions of BE3 FW don't report max_tx_qs value */
4057 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4058 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304059 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304060 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304061
4062 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4063 !use_sriov && be_physfn(adapter))
4064 res->max_rss_qs = (adapter->be3_native) ?
4065 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4066 res->max_rx_qs = res->max_rss_qs + 1;
4067
Suresh Reddye3dc8672014-01-06 13:02:25 +05304068 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304069 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304070 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4071 else
4072 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304073
4074 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004075 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304076 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4077 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4078}
4079
Sathya Perla30128032011-11-10 19:17:57 +00004080static void be_setup_init(struct be_adapter *adapter)
4081{
4082 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004083 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004084 adapter->if_handle = -1;
4085 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004086 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004087 if (be_physfn(adapter))
4088 adapter->cmd_privileges = MAX_PRIVILEGES;
4089 else
4090 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004091}
4092
Vasundhara Volambec84e62014-06-30 13:01:32 +05304093static int be_get_sriov_config(struct be_adapter *adapter)
4094{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304095 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304096 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304097
Vasundhara Volamf2858732015-03-04 00:44:33 -05004098 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304099
Vasundhara Volamace40af2015-03-04 00:44:34 -05004100 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304101 if (BE3_chip(adapter) && !res.max_vfs) {
4102 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4103 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4104 }
4105
Sathya Perlad3d18312014-08-01 17:47:30 +05304106 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304107
Vasundhara Volamace40af2015-03-04 00:44:34 -05004108 /* If during previous unload of the driver, the VFs were not disabled,
4109 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4110 * Instead use the TotalVFs value stored in the pci-dev struct.
4111 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304112 old_vfs = pci_num_vf(adapter->pdev);
4113 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004114 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4115 old_vfs);
4116
4117 adapter->pool_res.max_vfs =
4118 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304119 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304120 }
4121
4122 return 0;
4123}
4124
Vasundhara Volamace40af2015-03-04 00:44:34 -05004125static void be_alloc_sriov_res(struct be_adapter *adapter)
4126{
4127 int old_vfs = pci_num_vf(adapter->pdev);
4128 u16 num_vf_qs;
4129 int status;
4130
4131 be_get_sriov_config(adapter);
4132
4133 if (!old_vfs)
4134 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4135
4136 /* When the HW is in SRIOV capable configuration, the PF-pool
4137 * resources are given to PF during driver load, if there are no
4138 * old VFs. This facility is not available in BE3 FW.
4139 * Also, this is done by FW in Lancer chip.
4140 */
4141 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4142 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4143 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4144 num_vf_qs);
4145 if (status)
4146 dev_err(&adapter->pdev->dev,
4147 "Failed to optimize SRIOV resources\n");
4148 }
4149}
4150
Sathya Perla92bf14a2013-08-27 16:57:32 +05304151static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004152{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304153 struct device *dev = &adapter->pdev->dev;
4154 struct be_resources res = {0};
4155 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004156
Sathya Perla92bf14a2013-08-27 16:57:32 +05304157 if (BEx_chip(adapter)) {
4158 BEx_get_resources(adapter, &res);
4159 adapter->res = res;
4160 }
4161
Sathya Perla92bf14a2013-08-27 16:57:32 +05304162 /* For Lancer, SH etc read per-function resource limits from FW.
4163 * GET_FUNC_CONFIG returns per function guaranteed limits.
4164 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4165 */
Sathya Perla4c876612013-02-03 20:30:11 +00004166 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304167 status = be_cmd_get_func_config(adapter, &res);
4168 if (status)
4169 return status;
4170
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004171 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4172 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4173 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4174 res.max_rss_qs -= 1;
4175
Sathya Perla92bf14a2013-08-27 16:57:32 +05304176 /* If RoCE may be enabled stash away half the EQs for RoCE */
4177 if (be_roce_supported(adapter))
4178 res.max_evt_qs /= 2;
4179 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004180 }
4181
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004182 /* If FW supports RSS default queue, then skip creating non-RSS
4183 * queue for non-IP traffic.
4184 */
4185 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4186 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4187
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304188 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4189 be_max_txqs(adapter), be_max_rxqs(adapter),
4190 be_max_rss(adapter), be_max_eqs(adapter),
4191 be_max_vfs(adapter));
4192 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4193 be_max_uc(adapter), be_max_mc(adapter),
4194 be_max_vlans(adapter));
4195
Vasundhara Volamace40af2015-03-04 00:44:34 -05004196 /* Sanitize cfg_num_qs based on HW and platform limits */
4197 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4198 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304199 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004200}
4201
Sathya Perla39f1d942012-05-08 19:41:24 +00004202static int be_get_config(struct be_adapter *adapter)
4203{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004204 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304205 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004206
Suresh Reddy980df242015-12-30 01:29:03 -05004207 status = be_cmd_get_cntl_attributes(adapter);
4208 if (status)
4209 return status;
4210
Kalesh APe97e3cd2014-07-17 16:20:26 +05304211 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004212 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304213 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004214
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004215 if (!lancer_chip(adapter) && be_physfn(adapter))
4216 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4217
Sathya Perla6b085ba2015-02-23 04:20:09 -05004218 if (BEx_chip(adapter)) {
4219 level = be_cmd_get_fw_log_level(adapter);
4220 adapter->msg_enable =
4221 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4222 }
4223
4224 be_cmd_get_acpi_wol_cap(adapter);
4225
Vasundhara Volam21252372015-02-06 08:18:42 -05004226 be_cmd_query_port_name(adapter);
4227
4228 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304229 status = be_cmd_get_active_profile(adapter, &profile_id);
4230 if (!status)
4231 dev_info(&adapter->pdev->dev,
4232 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304233 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304234
Sathya Perla92bf14a2013-08-27 16:57:32 +05304235 status = be_get_resources(adapter);
4236 if (status)
4237 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004238
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304239 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4240 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304241 if (!adapter->pmac_id)
4242 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004243
Sathya Perla92bf14a2013-08-27 16:57:32 +05304244 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004245}
4246
Sathya Perla95046b92013-07-23 15:25:02 +05304247static int be_mac_setup(struct be_adapter *adapter)
4248{
4249 u8 mac[ETH_ALEN];
4250 int status;
4251
4252 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4253 status = be_cmd_get_perm_mac(adapter, mac);
4254 if (status)
4255 return status;
4256
4257 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4258 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304259 }
4260
Sathya Perla95046b92013-07-23 15:25:02 +05304261 return 0;
4262}
4263
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304264static void be_schedule_worker(struct be_adapter *adapter)
4265{
4266 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4267 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4268}
4269
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004270static void be_schedule_err_detection(struct be_adapter *adapter)
4271{
4272 schedule_delayed_work(&adapter->be_err_detection_work,
4273 msecs_to_jiffies(1000));
4274 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4275}
4276
Sathya Perla77071332013-08-27 16:57:34 +05304277static int be_setup_queues(struct be_adapter *adapter)
4278{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304279 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304280 int status;
4281
4282 status = be_evt_queues_create(adapter);
4283 if (status)
4284 goto err;
4285
4286 status = be_tx_qs_create(adapter);
4287 if (status)
4288 goto err;
4289
4290 status = be_rx_cqs_create(adapter);
4291 if (status)
4292 goto err;
4293
4294 status = be_mcc_queues_create(adapter);
4295 if (status)
4296 goto err;
4297
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304298 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4299 if (status)
4300 goto err;
4301
4302 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4303 if (status)
4304 goto err;
4305
Sathya Perla77071332013-08-27 16:57:34 +05304306 return 0;
4307err:
4308 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4309 return status;
4310}
4311
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304312int be_update_queues(struct be_adapter *adapter)
4313{
4314 struct net_device *netdev = adapter->netdev;
4315 int status;
4316
4317 if (netif_running(netdev))
4318 be_close(netdev);
4319
4320 be_cancel_worker(adapter);
4321
4322 /* If any vectors have been shared with RoCE we cannot re-program
4323 * the MSIx table.
4324 */
4325 if (!adapter->num_msix_roce_vec)
4326 be_msix_disable(adapter);
4327
4328 be_clear_queues(adapter);
4329
4330 if (!msix_enabled(adapter)) {
4331 status = be_msix_enable(adapter);
4332 if (status)
4333 return status;
4334 }
4335
4336 status = be_setup_queues(adapter);
4337 if (status)
4338 return status;
4339
4340 be_schedule_worker(adapter);
4341
4342 if (netif_running(netdev))
4343 status = be_open(netdev);
4344
4345 return status;
4346}
4347
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004348static inline int fw_major_num(const char *fw_ver)
4349{
4350 int fw_major = 0, i;
4351
4352 i = sscanf(fw_ver, "%d.", &fw_major);
4353 if (i != 1)
4354 return 0;
4355
4356 return fw_major;
4357}
4358
Sathya Perlaf962f842015-02-23 04:20:16 -05004359/* If any VFs are already enabled don't FLR the PF */
4360static bool be_reset_required(struct be_adapter *adapter)
4361{
4362 return pci_num_vf(adapter->pdev) ? false : true;
4363}
4364
4365/* Wait for the FW to be ready and perform the required initialization */
4366static int be_func_init(struct be_adapter *adapter)
4367{
4368 int status;
4369
4370 status = be_fw_wait_ready(adapter);
4371 if (status)
4372 return status;
4373
4374 if (be_reset_required(adapter)) {
4375 status = be_cmd_reset_function(adapter);
4376 if (status)
4377 return status;
4378
4379 /* Wait for interrupts to quiesce after an FLR */
4380 msleep(100);
4381
4382 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304383 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004384 }
4385
4386 /* Tell FW we're ready to fire cmds */
4387 status = be_cmd_fw_init(adapter);
4388 if (status)
4389 return status;
4390
4391 /* Allow interrupts for other ULPs running on NIC function */
4392 be_intr_set(adapter, true);
4393
4394 return 0;
4395}
4396
Sathya Perla5fb379e2009-06-18 00:02:59 +00004397static int be_setup(struct be_adapter *adapter)
4398{
Sathya Perla39f1d942012-05-08 19:41:24 +00004399 struct device *dev = &adapter->pdev->dev;
Kalesh APbcc84142015-08-05 03:27:48 -04004400 u32 en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004401 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004402
Sathya Perlaf962f842015-02-23 04:20:16 -05004403 status = be_func_init(adapter);
4404 if (status)
4405 return status;
4406
Sathya Perla30128032011-11-10 19:17:57 +00004407 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004408
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004409 if (!lancer_chip(adapter))
4410 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004411
Suresh Reddy980df242015-12-30 01:29:03 -05004412 /* invoke this cmd first to get pf_num and vf_num which are needed
4413 * for issuing profile related cmds
4414 */
4415 if (!BEx_chip(adapter)) {
4416 status = be_cmd_get_func_config(adapter, NULL);
4417 if (status)
4418 return status;
4419 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004420
Vasundhara Volamace40af2015-03-04 00:44:34 -05004421 if (!BE2_chip(adapter) && be_physfn(adapter))
4422 be_alloc_sriov_res(adapter);
4423
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004424 status = be_get_config(adapter);
4425 if (status)
4426 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004427
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004428 status = be_msix_enable(adapter);
4429 if (status)
4430 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004431
Kalesh APbcc84142015-08-05 03:27:48 -04004432 /* will enable all the needed filter flags in be_open() */
4433 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4434 en_flags = en_flags & be_if_cap_flags(adapter);
4435 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4436 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004437 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004438 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004439
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304440 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4441 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304442 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304443 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004444 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004445 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004446
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004447 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004448
Sathya Perla95046b92013-07-23 15:25:02 +05304449 status = be_mac_setup(adapter);
4450 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004451 goto err;
4452
Kalesh APe97e3cd2014-07-17 16:20:26 +05304453 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304454 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004455
Somnath Koture9e2a902013-10-24 14:37:53 +05304456 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304457 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304458 adapter->fw_ver);
4459 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4460 }
4461
Kalesh AP00d594c2015-01-20 03:51:44 -05004462 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4463 adapter->rx_fc);
4464 if (status)
4465 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4466 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004467
Kalesh AP00d594c2015-01-20 03:51:44 -05004468 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4469 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004470
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304471 if (be_physfn(adapter))
4472 be_cmd_set_logical_link_config(adapter,
4473 IFLA_VF_LINK_STATE_AUTO, 0);
4474
Vasundhara Volambec84e62014-06-30 13:01:32 +05304475 if (adapter->num_vfs)
4476 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004477
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004478 status = be_cmd_get_phy_info(adapter);
4479 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004480 adapter->phy.fc_autoneg = 1;
4481
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304482 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304483 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004484 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004485err:
4486 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004487 return status;
4488}
4489
Ivan Vecera66268732011-12-08 01:31:21 +00004490#ifdef CONFIG_NET_POLL_CONTROLLER
4491static void be_netpoll(struct net_device *netdev)
4492{
4493 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004494 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004495 int i;
4496
Sathya Perlae49cc342012-11-27 19:50:02 +00004497 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004498 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004499 napi_schedule(&eqo->napi);
4500 }
Ivan Vecera66268732011-12-08 01:31:21 +00004501}
4502#endif
4503
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004504int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4505{
4506 const struct firmware *fw;
4507 int status;
4508
4509 if (!netif_running(adapter->netdev)) {
4510 dev_err(&adapter->pdev->dev,
4511 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304512 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004513 }
4514
4515 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4516 if (status)
4517 goto fw_exit;
4518
4519 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4520
4521 if (lancer_chip(adapter))
4522 status = lancer_fw_download(adapter, fw);
4523 else
4524 status = be_fw_download(adapter, fw);
4525
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004526 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304527 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004528
Ajit Khaparde84517482009-09-04 03:12:16 +00004529fw_exit:
4530 release_firmware(fw);
4531 return status;
4532}
4533
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004534static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4535 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004536{
4537 struct be_adapter *adapter = netdev_priv(dev);
4538 struct nlattr *attr, *br_spec;
4539 int rem;
4540 int status = 0;
4541 u16 mode = 0;
4542
4543 if (!sriov_enabled(adapter))
4544 return -EOPNOTSUPP;
4545
4546 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004547 if (!br_spec)
4548 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004549
4550 nla_for_each_nested(attr, br_spec, rem) {
4551 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4552 continue;
4553
Thomas Grafb7c1a312014-11-26 13:42:17 +01004554 if (nla_len(attr) < sizeof(mode))
4555 return -EINVAL;
4556
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004557 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004558 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4559 return -EOPNOTSUPP;
4560
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004561 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4562 return -EINVAL;
4563
4564 status = be_cmd_set_hsw_config(adapter, 0, 0,
4565 adapter->if_handle,
4566 mode == BRIDGE_MODE_VEPA ?
4567 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004568 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004569 if (status)
4570 goto err;
4571
4572 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4573 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4574
4575 return status;
4576 }
4577err:
4578 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4579 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4580
4581 return status;
4582}
4583
4584static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004585 struct net_device *dev, u32 filter_mask,
4586 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004587{
4588 struct be_adapter *adapter = netdev_priv(dev);
4589 int status = 0;
4590 u8 hsw_mode;
4591
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004592 /* BE and Lancer chips support VEB mode only */
4593 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4594 hsw_mode = PORT_FWD_TYPE_VEB;
4595 } else {
4596 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004597 adapter->if_handle, &hsw_mode,
4598 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004599 if (status)
4600 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004601
4602 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4603 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004604 }
4605
4606 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4607 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004608 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004609 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004610}
4611
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304612#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004613/* VxLAN offload Notes:
4614 *
4615 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4616 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4617 * is expected to work across all types of IP tunnels once exported. Skyhawk
4618 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304619 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4620 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4621 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004622 *
4623 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4624 * adds more than one port, disable offloads and don't re-enable them again
4625 * until after all the tunnels are removed.
4626 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304627static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4628 __be16 port)
4629{
4630 struct be_adapter *adapter = netdev_priv(netdev);
4631 struct device *dev = &adapter->pdev->dev;
4632 int status;
4633
Ivan Veceraaf19e682015-08-14 22:30:01 +02004634 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304635 return;
4636
Jiri Benc1e5b3112015-09-17 16:11:13 +02004637 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4638 adapter->vxlan_port_aliases++;
4639 return;
4640 }
4641
Sathya Perlac9c47142014-03-27 10:46:19 +05304642 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304643 dev_info(dev,
4644 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004645 dev_info(dev, "Disabling VxLAN offloads\n");
4646 adapter->vxlan_port_count++;
4647 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304648 }
4649
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004650 if (adapter->vxlan_port_count++ >= 1)
4651 return;
4652
Sathya Perlac9c47142014-03-27 10:46:19 +05304653 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4654 OP_CONVERT_NORMAL_TO_TUNNEL);
4655 if (status) {
4656 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4657 goto err;
4658 }
4659
4660 status = be_cmd_set_vxlan_port(adapter, port);
4661 if (status) {
4662 dev_warn(dev, "Failed to add VxLAN port\n");
4663 goto err;
4664 }
4665 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4666 adapter->vxlan_port = port;
4667
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004668 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4669 NETIF_F_TSO | NETIF_F_TSO6 |
4670 NETIF_F_GSO_UDP_TUNNEL;
4671 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304672 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004673
Sathya Perlac9c47142014-03-27 10:46:19 +05304674 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4675 be16_to_cpu(port));
4676 return;
4677err:
4678 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304679}
4680
4681static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4682 __be16 port)
4683{
4684 struct be_adapter *adapter = netdev_priv(netdev);
4685
Ivan Veceraaf19e682015-08-14 22:30:01 +02004686 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304687 return;
4688
4689 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004690 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304691
Jiri Benc1e5b3112015-09-17 16:11:13 +02004692 if (adapter->vxlan_port_aliases) {
4693 adapter->vxlan_port_aliases--;
4694 return;
4695 }
4696
Sathya Perlac9c47142014-03-27 10:46:19 +05304697 be_disable_vxlan_offloads(adapter);
4698
4699 dev_info(&adapter->pdev->dev,
4700 "Disabled VxLAN offloads for UDP port %d\n",
4701 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004702done:
4703 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304704}
Joe Stringer725d5482014-11-13 16:38:13 -08004705
Jesse Gross5f352272014-12-23 22:37:26 -08004706static netdev_features_t be_features_check(struct sk_buff *skb,
4707 struct net_device *dev,
4708 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004709{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304710 struct be_adapter *adapter = netdev_priv(dev);
4711 u8 l4_hdr = 0;
4712
4713 /* The code below restricts offload features for some tunneled packets.
4714 * Offload features for normal (non tunnel) packets are unchanged.
4715 */
4716 if (!skb->encapsulation ||
4717 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4718 return features;
4719
4720 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4721 * should disable tunnel offload features if it's not a VxLAN packet,
4722 * as tunnel offloads have been enabled only for VxLAN. This is done to
4723 * allow other tunneled traffic like GRE work fine while VxLAN
4724 * offloads are configured in Skyhawk-R.
4725 */
4726 switch (vlan_get_protocol(skb)) {
4727 case htons(ETH_P_IP):
4728 l4_hdr = ip_hdr(skb)->protocol;
4729 break;
4730 case htons(ETH_P_IPV6):
4731 l4_hdr = ipv6_hdr(skb)->nexthdr;
4732 break;
4733 default:
4734 return features;
4735 }
4736
4737 if (l4_hdr != IPPROTO_UDP ||
4738 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4739 skb->inner_protocol != htons(ETH_P_TEB) ||
4740 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4741 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08004742 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304743
4744 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004745}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304746#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304747
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304748static int be_get_phys_port_id(struct net_device *dev,
4749 struct netdev_phys_item_id *ppid)
4750{
4751 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4752 struct be_adapter *adapter = netdev_priv(dev);
4753 u8 *id;
4754
4755 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4756 return -ENOSPC;
4757
4758 ppid->id[0] = adapter->hba_port_num + 1;
4759 id = &ppid->id[1];
4760 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4761 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4762 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4763
4764 ppid->id_len = id_len;
4765
4766 return 0;
4767}
4768
stephen hemmingere5686ad2012-01-05 19:10:25 +00004769static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004770 .ndo_open = be_open,
4771 .ndo_stop = be_close,
4772 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004773 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004774 .ndo_set_mac_address = be_mac_addr_set,
4775 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004776 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004777 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004778 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4779 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004780 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004781 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004782 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004783 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304784 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004785 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00004786#ifdef CONFIG_NET_POLL_CONTROLLER
4787 .ndo_poll_controller = be_netpoll,
4788#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004789 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4790 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304791#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304792 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304793#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304794#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304795 .ndo_add_vxlan_port = be_add_vxlan_port,
4796 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004797 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304798#endif
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304799 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004800};
4801
4802static void be_netdev_init(struct net_device *netdev)
4803{
4804 struct be_adapter *adapter = netdev_priv(netdev);
4805
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004806 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004807 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004808 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004809 if (be_multi_rxq(adapter))
4810 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004811
4812 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004813 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004814
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004815 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004816 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004817
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004818 netdev->priv_flags |= IFF_UNICAST_FLT;
4819
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004820 netdev->flags |= IFF_MULTICAST;
4821
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004822 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004823
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004824 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004825
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004826 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004827}
4828
Kalesh AP87ac1a52015-02-23 04:20:15 -05004829static void be_cleanup(struct be_adapter *adapter)
4830{
4831 struct net_device *netdev = adapter->netdev;
4832
4833 rtnl_lock();
4834 netif_device_detach(netdev);
4835 if (netif_running(netdev))
4836 be_close(netdev);
4837 rtnl_unlock();
4838
4839 be_clear(adapter);
4840}
4841
Kalesh AP484d76f2015-02-23 04:20:14 -05004842static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004843{
Kalesh APd0e1b312015-02-23 04:20:12 -05004844 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004845 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004846
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004847 status = be_setup(adapter);
4848 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05004849 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004850
Kalesh APd0e1b312015-02-23 04:20:12 -05004851 if (netif_running(netdev)) {
4852 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004853 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05004854 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004855 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004856
Kalesh APd0e1b312015-02-23 04:20:12 -05004857 netif_device_attach(netdev);
4858
Kalesh AP484d76f2015-02-23 04:20:14 -05004859 return 0;
4860}
4861
4862static int be_err_recover(struct be_adapter *adapter)
4863{
4864 struct device *dev = &adapter->pdev->dev;
4865 int status;
4866
4867 status = be_resume(adapter);
4868 if (status)
4869 goto err;
4870
Sathya Perla9fa465c2015-02-23 04:20:13 -05004871 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004872 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004873err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05004874 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05304875 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05004876 else
4877 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004878
4879 return status;
4880}
4881
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004882static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004883{
4884 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004885 container_of(work, struct be_adapter,
4886 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004887 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004888
4889 be_detect_error(adapter);
4890
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304891 if (be_check_error(adapter, BE_ERROR_HW)) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05004892 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05004893
4894 /* As of now error recovery support is in Lancer only */
4895 if (lancer_chip(adapter))
4896 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004897 }
4898
Sathya Perla9fa465c2015-02-23 04:20:13 -05004899 /* Always attempt recovery on VFs */
4900 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004901 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004902}
4903
Vasundhara Volam21252372015-02-06 08:18:42 -05004904static void be_log_sfp_info(struct be_adapter *adapter)
4905{
4906 int status;
4907
4908 status = be_cmd_query_sfp_info(adapter);
4909 if (!status) {
4910 dev_err(&adapter->pdev->dev,
4911 "Unqualified SFP+ detected on %c from %s part no: %s",
4912 adapter->port_name, adapter->phy.vendor_name,
4913 adapter->phy.vendor_pn);
4914 }
4915 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
4916}
4917
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004918static void be_worker(struct work_struct *work)
4919{
4920 struct be_adapter *adapter =
4921 container_of(work, struct be_adapter, work.work);
4922 struct be_rx_obj *rxo;
4923 int i;
4924
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004925 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05004926 * mcc completions
4927 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004928 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004929 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004930 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004931 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004932 goto reschedule;
4933 }
4934
4935 if (!adapter->stats_cmd_sent) {
4936 if (lancer_chip(adapter))
4937 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05304938 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004939 else
4940 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4941 }
4942
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304943 if (be_physfn(adapter) &&
4944 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004945 be_cmd_get_die_temperature(adapter);
4946
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004947 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304948 /* Replenish RX-queues starved due to memory
4949 * allocation failures.
4950 */
4951 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05304952 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004953 }
4954
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004955 /* EQ-delay update for Skyhawk is done while notifying EQ */
4956 if (!skyhawk_chip(adapter))
4957 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004958
Vasundhara Volam21252372015-02-06 08:18:42 -05004959 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
4960 be_log_sfp_info(adapter);
4961
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004962reschedule:
4963 adapter->work_counter++;
4964 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4965}
4966
Sathya Perla78fad34e2015-02-23 04:20:08 -05004967static void be_unmap_pci_bars(struct be_adapter *adapter)
4968{
4969 if (adapter->csr)
4970 pci_iounmap(adapter->pdev, adapter->csr);
4971 if (adapter->db)
4972 pci_iounmap(adapter->pdev, adapter->db);
4973}
4974
4975static int db_bar(struct be_adapter *adapter)
4976{
Kalesh AP18c57c72015-05-06 05:30:38 -04004977 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05004978 return 0;
4979 else
4980 return 4;
4981}
4982
4983static int be_roce_map_pci_bars(struct be_adapter *adapter)
4984{
4985 if (skyhawk_chip(adapter)) {
4986 adapter->roce_db.size = 4096;
4987 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4988 db_bar(adapter));
4989 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4990 db_bar(adapter));
4991 }
4992 return 0;
4993}
4994
4995static int be_map_pci_bars(struct be_adapter *adapter)
4996{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04004997 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05004998 u8 __iomem *addr;
4999 u32 sli_intf;
5000
5001 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5002 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5003 SLI_INTF_FAMILY_SHIFT;
5004 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5005
5006 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005007 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005008 if (!adapter->csr)
5009 return -ENOMEM;
5010 }
5011
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005012 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005013 if (!addr)
5014 goto pci_map_err;
5015 adapter->db = addr;
5016
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005017 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5018 if (be_physfn(adapter)) {
5019 /* PCICFG is the 2nd BAR in BE2 */
5020 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5021 if (!addr)
5022 goto pci_map_err;
5023 adapter->pcicfg = addr;
5024 } else {
5025 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5026 }
5027 }
5028
Sathya Perla78fad34e2015-02-23 04:20:08 -05005029 be_roce_map_pci_bars(adapter);
5030 return 0;
5031
5032pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005033 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005034 be_unmap_pci_bars(adapter);
5035 return -ENOMEM;
5036}
5037
5038static void be_drv_cleanup(struct be_adapter *adapter)
5039{
5040 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5041 struct device *dev = &adapter->pdev->dev;
5042
5043 if (mem->va)
5044 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5045
5046 mem = &adapter->rx_filter;
5047 if (mem->va)
5048 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5049
5050 mem = &adapter->stats_cmd;
5051 if (mem->va)
5052 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5053}
5054
5055/* Allocate and initialize various fields in be_adapter struct */
5056static int be_drv_init(struct be_adapter *adapter)
5057{
5058 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5059 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5060 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5061 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5062 struct device *dev = &adapter->pdev->dev;
5063 int status = 0;
5064
5065 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305066 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5067 &mbox_mem_alloc->dma,
5068 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005069 if (!mbox_mem_alloc->va)
5070 return -ENOMEM;
5071
5072 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5073 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5074 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005075
5076 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5077 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5078 &rx_filter->dma, GFP_KERNEL);
5079 if (!rx_filter->va) {
5080 status = -ENOMEM;
5081 goto free_mbox;
5082 }
5083
5084 if (lancer_chip(adapter))
5085 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5086 else if (BE2_chip(adapter))
5087 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5088 else if (BE3_chip(adapter))
5089 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5090 else
5091 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5092 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5093 &stats_cmd->dma, GFP_KERNEL);
5094 if (!stats_cmd->va) {
5095 status = -ENOMEM;
5096 goto free_rx_filter;
5097 }
5098
5099 mutex_init(&adapter->mbox_lock);
5100 spin_lock_init(&adapter->mcc_lock);
5101 spin_lock_init(&adapter->mcc_cq_lock);
5102 init_completion(&adapter->et_cmd_compl);
5103
5104 pci_save_state(adapter->pdev);
5105
5106 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005107 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5108 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005109
5110 adapter->rx_fc = true;
5111 adapter->tx_fc = true;
5112
5113 /* Must be a power of 2 or else MODULO will BUG_ON */
5114 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005115
5116 return 0;
5117
5118free_rx_filter:
5119 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5120free_mbox:
5121 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5122 mbox_mem_alloc->dma);
5123 return status;
5124}
5125
5126static void be_remove(struct pci_dev *pdev)
5127{
5128 struct be_adapter *adapter = pci_get_drvdata(pdev);
5129
5130 if (!adapter)
5131 return;
5132
5133 be_roce_dev_remove(adapter);
5134 be_intr_set(adapter, false);
5135
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005136 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005137
5138 unregister_netdev(adapter->netdev);
5139
5140 be_clear(adapter);
5141
5142 /* tell fw we're done with firing cmds */
5143 be_cmd_fw_clean(adapter);
5144
5145 be_unmap_pci_bars(adapter);
5146 be_drv_cleanup(adapter);
5147
5148 pci_disable_pcie_error_reporting(pdev);
5149
5150 pci_release_regions(pdev);
5151 pci_disable_device(pdev);
5152
5153 free_netdev(adapter->netdev);
5154}
5155
Arnd Bergmann9a032592015-05-18 23:06:45 +02005156static ssize_t be_hwmon_show_temp(struct device *dev,
5157 struct device_attribute *dev_attr,
5158 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305159{
5160 struct be_adapter *adapter = dev_get_drvdata(dev);
5161
5162 /* Unit: millidegree Celsius */
5163 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5164 return -EIO;
5165 else
5166 return sprintf(buf, "%u\n",
5167 adapter->hwmon_info.be_on_die_temp * 1000);
5168}
5169
5170static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5171 be_hwmon_show_temp, NULL, 1);
5172
5173static struct attribute *be_hwmon_attrs[] = {
5174 &sensor_dev_attr_temp1_input.dev_attr.attr,
5175 NULL
5176};
5177
5178ATTRIBUTE_GROUPS(be_hwmon);
5179
Sathya Perlad3791422012-09-28 04:39:44 +00005180static char *mc_name(struct be_adapter *adapter)
5181{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305182 char *str = ""; /* default */
5183
5184 switch (adapter->mc_type) {
5185 case UMC:
5186 str = "UMC";
5187 break;
5188 case FLEX10:
5189 str = "FLEX10";
5190 break;
5191 case vNIC1:
5192 str = "vNIC-1";
5193 break;
5194 case nPAR:
5195 str = "nPAR";
5196 break;
5197 case UFP:
5198 str = "UFP";
5199 break;
5200 case vNIC2:
5201 str = "vNIC-2";
5202 break;
5203 default:
5204 str = "";
5205 }
5206
5207 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005208}
5209
5210static inline char *func_name(struct be_adapter *adapter)
5211{
5212 return be_physfn(adapter) ? "PF" : "VF";
5213}
5214
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005215static inline char *nic_name(struct pci_dev *pdev)
5216{
5217 switch (pdev->device) {
5218 case OC_DEVICE_ID1:
5219 return OC_NAME;
5220 case OC_DEVICE_ID2:
5221 return OC_NAME_BE;
5222 case OC_DEVICE_ID3:
5223 case OC_DEVICE_ID4:
5224 return OC_NAME_LANCER;
5225 case BE_DEVICE_ID2:
5226 return BE3_NAME;
5227 case OC_DEVICE_ID5:
5228 case OC_DEVICE_ID6:
5229 return OC_NAME_SH;
5230 default:
5231 return BE_NAME;
5232 }
5233}
5234
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005235static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005236{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005237 struct be_adapter *adapter;
5238 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005239 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005240
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305241 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5242
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005243 status = pci_enable_device(pdev);
5244 if (status)
5245 goto do_none;
5246
5247 status = pci_request_regions(pdev, DRV_NAME);
5248 if (status)
5249 goto disable_dev;
5250 pci_set_master(pdev);
5251
Sathya Perla7f640062012-06-05 19:37:20 +00005252 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305253 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005254 status = -ENOMEM;
5255 goto rel_reg;
5256 }
5257 adapter = netdev_priv(netdev);
5258 adapter->pdev = pdev;
5259 pci_set_drvdata(pdev, adapter);
5260 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005261 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005262
Russell King4c15c242013-06-26 23:49:11 +01005263 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005264 if (!status) {
5265 netdev->features |= NETIF_F_HIGHDMA;
5266 } else {
Russell King4c15c242013-06-26 23:49:11 +01005267 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005268 if (status) {
5269 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5270 goto free_netdev;
5271 }
5272 }
5273
Kalesh AP2f951a92014-09-12 17:39:21 +05305274 status = pci_enable_pcie_error_reporting(pdev);
5275 if (!status)
5276 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005277
Sathya Perla78fad34e2015-02-23 04:20:08 -05005278 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005279 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005280 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005281
Sathya Perla78fad34e2015-02-23 04:20:08 -05005282 status = be_drv_init(adapter);
5283 if (status)
5284 goto unmap_bars;
5285
Sathya Perla5fb379e2009-06-18 00:02:59 +00005286 status = be_setup(adapter);
5287 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005288 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005289
Sathya Perla3abcded2010-10-03 22:12:27 -07005290 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005291 status = register_netdev(netdev);
5292 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005293 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005294
Parav Pandit045508a2012-03-26 14:27:13 +00005295 be_roce_dev_add(adapter);
5296
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005297 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005298
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305299 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005300 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305301 adapter->hwmon_info.hwmon_dev =
5302 devm_hwmon_device_register_with_groups(&pdev->dev,
5303 DRV_NAME,
5304 adapter,
5305 be_hwmon_groups);
5306 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5307 }
5308
Sathya Perlad3791422012-09-28 04:39:44 +00005309 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005310 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005311
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005312 return 0;
5313
Sathya Perla5fb379e2009-06-18 00:02:59 +00005314unsetup:
5315 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005316drv_cleanup:
5317 be_drv_cleanup(adapter);
5318unmap_bars:
5319 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005320free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005321 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005322rel_reg:
5323 pci_release_regions(pdev);
5324disable_dev:
5325 pci_disable_device(pdev);
5326do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005327 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005328 return status;
5329}
5330
5331static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5332{
5333 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005334
Suresh Reddy76a9e082014-01-15 13:23:40 +05305335 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005336 be_setup_wol(adapter, true);
5337
Ajit Khaparded4360d62013-11-22 12:51:09 -06005338 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005339 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005340
Kalesh AP87ac1a52015-02-23 04:20:15 -05005341 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005342
5343 pci_save_state(pdev);
5344 pci_disable_device(pdev);
5345 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5346 return 0;
5347}
5348
Kalesh AP484d76f2015-02-23 04:20:14 -05005349static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005350{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005351 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005352 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005353
5354 status = pci_enable_device(pdev);
5355 if (status)
5356 return status;
5357
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005358 pci_restore_state(pdev);
5359
Kalesh AP484d76f2015-02-23 04:20:14 -05005360 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005361 if (status)
5362 return status;
5363
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005364 be_schedule_err_detection(adapter);
5365
Suresh Reddy76a9e082014-01-15 13:23:40 +05305366 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005367 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005368
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005369 return 0;
5370}
5371
Sathya Perla82456b02010-02-17 01:35:37 +00005372/*
5373 * An FLR will stop BE from DMAing any data.
5374 */
5375static void be_shutdown(struct pci_dev *pdev)
5376{
5377 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005378
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005379 if (!adapter)
5380 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005381
Devesh Sharmad114f992014-06-10 19:32:15 +05305382 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005383 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005384 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005385
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005386 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005387
Ajit Khaparde57841862011-04-06 18:08:43 +00005388 be_cmd_reset_function(adapter);
5389
Sathya Perla82456b02010-02-17 01:35:37 +00005390 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005391}
5392
Sathya Perlacf588472010-02-14 21:22:01 +00005393static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305394 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005395{
5396 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005397
5398 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5399
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305400 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5401 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005402
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005403 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005404
Kalesh AP87ac1a52015-02-23 04:20:15 -05005405 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005406 }
Sathya Perlacf588472010-02-14 21:22:01 +00005407
5408 if (state == pci_channel_io_perm_failure)
5409 return PCI_ERS_RESULT_DISCONNECT;
5410
5411 pci_disable_device(pdev);
5412
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005413 /* The error could cause the FW to trigger a flash debug dump.
5414 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005415 * can cause it not to recover; wait for it to finish.
5416 * Wait only for first function as it is needed only once per
5417 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005418 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005419 if (pdev->devfn == 0)
5420 ssleep(30);
5421
Sathya Perlacf588472010-02-14 21:22:01 +00005422 return PCI_ERS_RESULT_NEED_RESET;
5423}
5424
5425static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5426{
5427 struct be_adapter *adapter = pci_get_drvdata(pdev);
5428 int status;
5429
5430 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005431
5432 status = pci_enable_device(pdev);
5433 if (status)
5434 return PCI_ERS_RESULT_DISCONNECT;
5435
5436 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005437 pci_restore_state(pdev);
5438
5439 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005440 dev_info(&adapter->pdev->dev,
5441 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005442 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005443 if (status)
5444 return PCI_ERS_RESULT_DISCONNECT;
5445
Sathya Perlad6b6d982012-09-05 01:56:48 +00005446 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305447 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005448 return PCI_ERS_RESULT_RECOVERED;
5449}
5450
5451static void be_eeh_resume(struct pci_dev *pdev)
5452{
5453 int status = 0;
5454 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005455
5456 dev_info(&adapter->pdev->dev, "EEH resume\n");
5457
5458 pci_save_state(pdev);
5459
Kalesh AP484d76f2015-02-23 04:20:14 -05005460 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005461 if (status)
5462 goto err;
5463
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005464 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005465 return;
5466err:
5467 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005468}
5469
Vasundhara Volamace40af2015-03-04 00:44:34 -05005470static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5471{
5472 struct be_adapter *adapter = pci_get_drvdata(pdev);
5473 u16 num_vf_qs;
5474 int status;
5475
5476 if (!num_vfs)
5477 be_vf_clear(adapter);
5478
5479 adapter->num_vfs = num_vfs;
5480
5481 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5482 dev_warn(&pdev->dev,
5483 "Cannot disable VFs while they are assigned\n");
5484 return -EBUSY;
5485 }
5486
5487 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5488 * are equally distributed across the max-number of VFs. The user may
5489 * request only a subset of the max-vfs to be enabled.
5490 * Based on num_vfs, redistribute the resources across num_vfs so that
5491 * each VF will have access to more number of resources.
5492 * This facility is not available in BE3 FW.
5493 * Also, this is done by FW in Lancer chip.
5494 */
5495 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5496 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5497 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5498 adapter->num_vfs, num_vf_qs);
5499 if (status)
5500 dev_err(&pdev->dev,
5501 "Failed to optimize SR-IOV resources\n");
5502 }
5503
5504 status = be_get_resources(adapter);
5505 if (status)
5506 return be_cmd_status(status);
5507
5508 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5509 rtnl_lock();
5510 status = be_update_queues(adapter);
5511 rtnl_unlock();
5512 if (status)
5513 return be_cmd_status(status);
5514
5515 if (adapter->num_vfs)
5516 status = be_vf_setup(adapter);
5517
5518 if (!status)
5519 return adapter->num_vfs;
5520
5521 return 0;
5522}
5523
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005524static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005525 .error_detected = be_eeh_err_detected,
5526 .slot_reset = be_eeh_reset,
5527 .resume = be_eeh_resume,
5528};
5529
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005530static struct pci_driver be_driver = {
5531 .name = DRV_NAME,
5532 .id_table = be_dev_ids,
5533 .probe = be_probe,
5534 .remove = be_remove,
5535 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005536 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005537 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005538 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005539 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005540};
5541
5542static int __init be_init_module(void)
5543{
Joe Perches8e95a202009-12-03 07:58:21 +00005544 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5545 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005546 printk(KERN_WARNING DRV_NAME
5547 " : Module param rx_frag_size must be 2048/4096/8192."
5548 " Using 2048\n");
5549 rx_frag_size = 2048;
5550 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005551
Vasundhara Volamace40af2015-03-04 00:44:34 -05005552 if (num_vfs > 0) {
5553 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5554 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5555 }
5556
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005557 return pci_register_driver(&be_driver);
5558}
5559module_init(be_init_module);
5560
5561static void __exit be_exit_module(void)
5562{
5563 pci_unregister_driver(&be_driver);
5564}
5565module_exit(be_exit_module);