blob: c9f9d4b7e6af41d9a27087ff6a21e275fbe835cc [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530182 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000199
200 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530208
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000214
215 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000216 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla8788fdc2009-07-27 22:52:03 +0000219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222{
223 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000227
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530228 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000229 return;
230
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239}
240
Sathya Perla8788fdc2009-07-27 22:52:03 +0000241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242{
243 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000248
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530249 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000250 return;
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256}
257
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
Kalesh APbcc84142015-08-05 03:27:48 -0400276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000285 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000297 }
298
Sathya Perla5a712c12013-07-23 15:24:59 +0530299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000301 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000304 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
dingtianhong61d23e92013-12-30 15:40:43 +0800310 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530311 status = -EPERM;
312 goto err;
313 }
Kalesh APbcc84142015-08-05 03:27:48 -0400314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000317 return 0;
318err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320 return status;
321}
322
Sathya Perlaca34fe32012-11-06 17:48:56 +0000323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500330 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
333 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
337 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500348 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
351 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
355 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000364 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
Sathya Perlaca34fe32012-11-06 17:48:56 +0000408static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
Ajit Khaparde61000862013-10-03 16:16:33 -0500454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530498 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500506}
507
Selvin Xavier005d5692011-05-16 07:36:35 +0000508static void populate_lancer_stats(struct be_adapter *adapter)
509{
Selvin Xavier005d5692011-05-16 07:36:35 +0000510 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000538 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000541 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000542 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000543}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000544
Sathya Perla09c1c682011-08-22 19:41:53 +0000545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
Jingoo Han4188e7d2013-08-05 18:02:02 +0900557static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530558 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000570void be_parse_stats(struct be_adapter *adapter)
571{
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573 struct be_rx_obj *rxo;
574 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000575 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000576
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000579 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500582 else if (BE3_chip(adapter))
583 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000584 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500585 else
586 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000587
Ajit Khaparde61000862013-10-03 16:16:33 -0500588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000589 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000592 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000593 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000594}
595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530597 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000600 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000602 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000603 u64 pkts, bytes;
604 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700605 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606
Sathya Perla3abcded2010-10-03 22:12:27 -0700607 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530609
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700620 }
621
Sathya Perla3c8def92011-06-12 20:01:58 +0000622 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530624
Sathya Perlaab1594e2011-07-25 19:10:15 +0000625 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000632 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
634 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000650
Sathya Perlaab1594e2011-07-25 19:10:15 +0000651 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
653 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000655
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000661 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662}
663
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666 struct net_device *netdev = adapter->netdev;
667
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000669 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000672
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530673 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682{
Sathya Perla3c8def92011-06-12 20:01:58 +0000683 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530684 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000685
Sathya Perlaab1594e2011-07-25 19:10:15 +0000686 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000687 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500688 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530689 stats->tx_pkts += tx_pkts;
690 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
691 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000692 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693}
694
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500695/* Returns number of WRBs needed for the skb */
696static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500698 /* +1 for the header wrb */
699 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500704 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
707 wrb->rsvd0 = 0;
708}
709
710/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
711 * to avoid the swap and shift/mask operations in wrb_fill().
712 */
713static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
714{
715 wrb->frag_pa_hi = 0;
716 wrb->frag_pa_lo = 0;
717 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000718 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719}
720
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000721static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530722 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000723{
724 u8 vlan_prio;
725 u16 vlan_tag;
726
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100727 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000728 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
729 /* If vlan priority provided by OS is NOT in available bmap */
730 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
731 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500732 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000733
734 return vlan_tag;
735}
736
Sathya Perlac9c47142014-03-27 10:46:19 +0530737/* Used only for IP tunnel packets */
738static u16 skb_inner_ip_proto(struct sk_buff *skb)
739{
740 return (inner_ip_hdr(skb)->version == 4) ?
741 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
742}
743
744static u16 skb_ip_proto(struct sk_buff *skb)
745{
746 return (ip_hdr(skb)->version == 4) ?
747 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
748}
749
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530750static inline bool be_is_txq_full(struct be_tx_obj *txo)
751{
752 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
753}
754
755static inline bool be_can_txq_wake(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) < txo->q.len / 2;
758}
759
760static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
763}
764
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530765static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
766 struct sk_buff *skb,
767 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530769 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000771 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530772 BE_WRB_F_SET(wrb_params->features, LSO, 1);
773 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000774 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530775 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530777 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530778 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530779 proto = skb_inner_ip_proto(skb);
780 } else {
781 proto = skb_ip_proto(skb);
782 }
783 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530784 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530785 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530786 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 }
788
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100789 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530790 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
791 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
793
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530794 BE_WRB_F_SET(wrb_params->features, CRC, 1);
795}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500796
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530797static void wrb_fill_hdr(struct be_adapter *adapter,
798 struct be_eth_hdr_wrb *hdr,
799 struct be_wrb_params *wrb_params,
800 struct sk_buff *skb)
801{
802 memset(hdr, 0, sizeof(*hdr));
803
804 SET_TX_WRB_HDR_BITS(crc, hdr,
805 BE_WRB_F_GET(wrb_params->features, CRC));
806 SET_TX_WRB_HDR_BITS(ipcs, hdr,
807 BE_WRB_F_GET(wrb_params->features, IPCS));
808 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
809 BE_WRB_F_GET(wrb_params->features, TCPCS));
810 SET_TX_WRB_HDR_BITS(udpcs, hdr,
811 BE_WRB_F_GET(wrb_params->features, UDPCS));
812
813 SET_TX_WRB_HDR_BITS(lso, hdr,
814 BE_WRB_F_GET(wrb_params->features, LSO));
815 SET_TX_WRB_HDR_BITS(lso6, hdr,
816 BE_WRB_F_GET(wrb_params->features, LSO6));
817 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
818
819 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
820 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500821 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530822 SET_TX_WRB_HDR_BITS(event, hdr,
823 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
824 SET_TX_WRB_HDR_BITS(vlan, hdr,
825 BE_WRB_F_GET(wrb_params->features, VLAN));
826 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
827
828 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
829 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530830 SET_TX_WRB_HDR_BITS(mgmt, hdr,
831 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832}
833
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000834static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530835 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000836{
837 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500838 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000839
Sathya Perla7101e112010-03-22 20:41:12 +0000840
Sathya Perlaf986afc2015-02-06 08:18:43 -0500841 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
842 (u64)le32_to_cpu(wrb->frag_pa_lo);
843 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000844 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500845 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000846 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500847 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000848 }
849}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530851/* Grab a WRB header for xmit */
852static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530854 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530856 queue_head_inc(&txo->q);
857 return head;
858}
859
860/* Set up the WRB header for xmit */
861static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
862 struct be_tx_obj *txo,
863 struct be_wrb_params *wrb_params,
864 struct sk_buff *skb, u16 head)
865{
866 u32 num_frags = skb_wrb_cnt(skb);
867 struct be_queue_info *txq = &txo->q;
868 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
869
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530870 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500871 be_dws_cpu_to_le(hdr, sizeof(*hdr));
872
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500873 BUG_ON(txo->sent_skb_list[head]);
874 txo->sent_skb_list[head] = skb;
875 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530876 atomic_add(num_frags, &txq->used);
877 txo->last_req_wrb_cnt = num_frags;
878 txo->pend_wrb_cnt += num_frags;
879}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530881/* Setup a WRB fragment (buffer descriptor) for xmit */
882static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
883 int len)
884{
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700887
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530888 wrb = queue_head_node(txq);
889 wrb_fill(wrb, busaddr, len);
890 queue_head_inc(txq);
891}
892
893/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
894 * was invoked. The producer index is restored to the previous packet and the
895 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
896 */
897static void be_xmit_restore(struct be_adapter *adapter,
898 struct be_tx_obj *txo, u16 head, bool map_single,
899 u32 copied)
900{
901 struct device *dev;
902 struct be_eth_wrb *wrb;
903 struct be_queue_info *txq = &txo->q;
904
905 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500906 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530907
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500908 /* skip the first wrb (hdr); it's not mapped */
909 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000910 while (copied) {
911 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000912 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000913 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500914 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000915 queue_head_inc(txq);
916 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530917
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500918 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530919}
920
921/* Enqueue the given packet for transmit. This routine allocates WRBs for the
922 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
923 * of WRBs used up by the packet.
924 */
925static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
926 struct sk_buff *skb,
927 struct be_wrb_params *wrb_params)
928{
929 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
930 struct device *dev = &adapter->pdev->dev;
931 struct be_queue_info *txq = &txo->q;
932 bool map_single = false;
933 u16 head = txq->head;
934 dma_addr_t busaddr;
935 int len;
936
937 head = be_tx_get_wrb_hdr(txo);
938
939 if (skb->len > skb->data_len) {
940 len = skb_headlen(skb);
941
942 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
943 if (dma_mapping_error(dev, busaddr))
944 goto dma_err;
945 map_single = true;
946 be_tx_setup_wrb_frag(txo, busaddr, len);
947 copied += len;
948 }
949
950 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
951 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
952 len = skb_frag_size(frag);
953
954 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
955 if (dma_mapping_error(dev, busaddr))
956 goto dma_err;
957 be_tx_setup_wrb_frag(txo, busaddr, len);
958 copied += len;
959 }
960
961 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
962
963 be_tx_stats_update(txo, skb);
964 return wrb_cnt;
965
966dma_err:
967 adapter->drv_stats.dma_map_errors++;
968 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000969 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970}
971
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500972static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
973{
974 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
975}
976
Somnath Kotur93040ae2012-06-26 22:32:10 +0000977static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000978 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530979 struct be_wrb_params
980 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000981{
982 u16 vlan_tag = 0;
983
984 skb = skb_share_check(skb, GFP_ATOMIC);
985 if (unlikely(!skb))
986 return skb;
987
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100988 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000989 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530990
991 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
992 if (!vlan_tag)
993 vlan_tag = adapter->pvid;
994 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
995 * skip VLAN insertion
996 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530997 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530998 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000999
1000 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001001 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1002 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001003 if (unlikely(!skb))
1004 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001005 skb->vlan_tci = 0;
1006 }
1007
1008 /* Insert the outer VLAN, if any */
1009 if (adapter->qnq_vid) {
1010 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001011 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1012 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001013 if (unlikely(!skb))
1014 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301015 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001016 }
1017
Somnath Kotur93040ae2012-06-26 22:32:10 +00001018 return skb;
1019}
1020
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001021static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1022{
1023 struct ethhdr *eh = (struct ethhdr *)skb->data;
1024 u16 offset = ETH_HLEN;
1025
1026 if (eh->h_proto == htons(ETH_P_IPV6)) {
1027 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1028
1029 offset += sizeof(struct ipv6hdr);
1030 if (ip6h->nexthdr != NEXTHDR_TCP &&
1031 ip6h->nexthdr != NEXTHDR_UDP) {
1032 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301033 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001034
1035 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1036 if (ehdr->hdrlen == 0xff)
1037 return true;
1038 }
1039 }
1040 return false;
1041}
1042
1043static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1044{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001045 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001046}
1047
Sathya Perla748b5392014-05-09 13:29:13 +05301048static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001049{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001050 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051}
1052
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301053static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1054 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301055 struct be_wrb_params
1056 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001058 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001059 unsigned int eth_hdr_len;
1060 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001061
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001062 /* For padded packets, BE HW modifies tot_len field in IP header
1063 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001064 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001065 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001066 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1067 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001068 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001069 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001070 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001071 ip = (struct iphdr *)ip_hdr(skb);
1072 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1073 }
1074
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001075 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301076 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001077 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301078 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001079 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301080 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001081
Somnath Kotur93040ae2012-06-26 22:32:10 +00001082 /* HW has a bug wherein it will calculate CSUM for VLAN
1083 * pkts even though it is disabled.
1084 * Manually insert VLAN in pkt.
1085 */
1086 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001087 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001089 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301090 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001091 }
1092
1093 /* HW may lockup when VLAN HW tagging is requested on
1094 * certain ipv6 packets. Drop such pkts if the HW workaround to
1095 * skip HW tagging is not enabled by FW.
1096 */
1097 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301098 (adapter->pvid || adapter->qnq_vid) &&
1099 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001100 goto tx_drop;
1101
1102 /* Manual VLAN tag insertion to prevent:
1103 * ASIC lockup when the ASIC inserts VLAN tag into
1104 * certain ipv6 packets. Insert VLAN tags in driver,
1105 * and set event, completion, vlan bits accordingly
1106 * in the Tx WRB.
1107 */
1108 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1109 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301110 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001111 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301112 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001113 }
1114
Sathya Perlaee9c7992013-05-22 23:04:55 +00001115 return skb;
1116tx_drop:
1117 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301118err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001119 return NULL;
1120}
1121
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301122static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301124 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301125{
Suresh Reddy8227e992015-10-12 03:47:19 -04001126 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1127 * packets that are 32b or less may cause a transmit stall
1128 * on that port. The workaround is to pad such packets
1129 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301130 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001131 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001132 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301133 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301134 }
1135
1136 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301137 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301138 if (!skb)
1139 return NULL;
1140 }
1141
1142 return skb;
1143}
1144
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001145static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1146{
1147 struct be_queue_info *txq = &txo->q;
1148 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1149
1150 /* Mark the last request eventable if it hasn't been marked already */
1151 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1152 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1153
1154 /* compose a dummy wrb if there are odd set of wrbs to notify */
1155 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001156 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001157 queue_head_inc(txq);
1158 atomic_inc(&txq->used);
1159 txo->pend_wrb_cnt++;
1160 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1161 TX_HDR_WRB_NUM_SHIFT);
1162 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1163 TX_HDR_WRB_NUM_SHIFT);
1164 }
1165 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1166 txo->pend_wrb_cnt = 0;
1167}
1168
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301169/* OS2BMC related */
1170
1171#define DHCP_CLIENT_PORT 68
1172#define DHCP_SERVER_PORT 67
1173#define NET_BIOS_PORT1 137
1174#define NET_BIOS_PORT2 138
1175#define DHCPV6_RAS_PORT 547
1176
1177#define is_mc_allowed_on_bmc(adapter, eh) \
1178 (!is_multicast_filt_enabled(adapter) && \
1179 is_multicast_ether_addr(eh->h_dest) && \
1180 !is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_bc_allowed_on_bmc(adapter, eh) \
1183 (!is_broadcast_filt_enabled(adapter) && \
1184 is_broadcast_ether_addr(eh->h_dest))
1185
1186#define is_arp_allowed_on_bmc(adapter, skb) \
1187 (is_arp(skb) && is_arp_filt_enabled(adapter))
1188
1189#define is_broadcast_packet(eh, adapter) \
1190 (is_multicast_ether_addr(eh->h_dest) && \
1191 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1192
1193#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1194
1195#define is_arp_filt_enabled(adapter) \
1196 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1197
1198#define is_dhcp_client_filt_enabled(adapter) \
1199 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1200
1201#define is_dhcp_srvr_filt_enabled(adapter) \
1202 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1203
1204#define is_nbios_filt_enabled(adapter) \
1205 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1206
1207#define is_ipv6_na_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & \
1209 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1210
1211#define is_ipv6_ra_filt_enabled(adapter) \
1212 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1213
1214#define is_ipv6_ras_filt_enabled(adapter) \
1215 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1216
1217#define is_broadcast_filt_enabled(adapter) \
1218 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1219
1220#define is_multicast_filt_enabled(adapter) \
1221 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1222
1223static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1224 struct sk_buff **skb)
1225{
1226 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1227 bool os2bmc = false;
1228
1229 if (!be_is_os2bmc_enabled(adapter))
1230 goto done;
1231
1232 if (!is_multicast_ether_addr(eh->h_dest))
1233 goto done;
1234
1235 if (is_mc_allowed_on_bmc(adapter, eh) ||
1236 is_bc_allowed_on_bmc(adapter, eh) ||
1237 is_arp_allowed_on_bmc(adapter, (*skb))) {
1238 os2bmc = true;
1239 goto done;
1240 }
1241
1242 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1243 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1244 u8 nexthdr = hdr->nexthdr;
1245
1246 if (nexthdr == IPPROTO_ICMPV6) {
1247 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1248
1249 switch (icmp6->icmp6_type) {
1250 case NDISC_ROUTER_ADVERTISEMENT:
1251 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1252 goto done;
1253 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1254 os2bmc = is_ipv6_na_filt_enabled(adapter);
1255 goto done;
1256 default:
1257 break;
1258 }
1259 }
1260 }
1261
1262 if (is_udp_pkt((*skb))) {
1263 struct udphdr *udp = udp_hdr((*skb));
1264
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001265 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301266 case DHCP_CLIENT_PORT:
1267 os2bmc = is_dhcp_client_filt_enabled(adapter);
1268 goto done;
1269 case DHCP_SERVER_PORT:
1270 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1271 goto done;
1272 case NET_BIOS_PORT1:
1273 case NET_BIOS_PORT2:
1274 os2bmc = is_nbios_filt_enabled(adapter);
1275 goto done;
1276 case DHCPV6_RAS_PORT:
1277 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1278 goto done;
1279 default:
1280 break;
1281 }
1282 }
1283done:
1284 /* For packets over a vlan, which are destined
1285 * to BMC, asic expects the vlan to be inline in the packet.
1286 */
1287 if (os2bmc)
1288 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1289
1290 return os2bmc;
1291}
1292
Sathya Perlaee9c7992013-05-22 23:04:55 +00001293static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1294{
1295 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001296 u16 q_idx = skb_get_queue_mapping(skb);
1297 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301298 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301299 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001300 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001301
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301302 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001303 if (unlikely(!skb))
1304 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001305
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301306 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1307
1308 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001309 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001310 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001311 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001313
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301314 /* if os2bmc is enabled and if the pkt is destined to bmc,
1315 * enqueue the pkt a 2nd time with mgmt bit set.
1316 */
1317 if (be_send_pkt_to_bmc(adapter, &skb)) {
1318 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1319 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1320 if (unlikely(!wrb_cnt))
1321 goto drop;
1322 else
1323 skb_get(skb);
1324 }
1325
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301326 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001327 netif_stop_subqueue(netdev, q_idx);
1328 tx_stats(txo)->tx_stops++;
1329 }
1330
1331 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1332 be_xmit_flush(adapter, txo);
1333
1334 return NETDEV_TX_OK;
1335drop:
1336 tx_stats(txo)->tx_drv_drops++;
1337 /* Flush the already enqueued tx requests */
1338 if (flush && txo->pend_wrb_cnt)
1339 be_xmit_flush(adapter, txo);
1340
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 return NETDEV_TX_OK;
1342}
1343
1344static int be_change_mtu(struct net_device *netdev, int new_mtu)
1345{
1346 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301347 struct device *dev = &adapter->pdev->dev;
1348
1349 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1350 dev_info(dev, "MTU must be between %d and %d bytes\n",
1351 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 return -EINVAL;
1353 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301354
1355 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301356 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357 netdev->mtu = new_mtu;
1358 return 0;
1359}
1360
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001361static inline bool be_in_all_promisc(struct be_adapter *adapter)
1362{
1363 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1364 BE_IF_FLAGS_ALL_PROMISCUOUS;
1365}
1366
1367static int be_set_vlan_promisc(struct be_adapter *adapter)
1368{
1369 struct device *dev = &adapter->pdev->dev;
1370 int status;
1371
1372 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1373 return 0;
1374
1375 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1376 if (!status) {
1377 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1378 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1379 } else {
1380 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1381 }
1382 return status;
1383}
1384
1385static int be_clear_vlan_promisc(struct be_adapter *adapter)
1386{
1387 struct device *dev = &adapter->pdev->dev;
1388 int status;
1389
1390 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1391 if (!status) {
1392 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1393 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1394 }
1395 return status;
1396}
1397
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001399 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1400 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 */
Sathya Perla10329df2012-06-05 19:37:18 +00001402static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403{
Vasundhara Volam50762662014-09-12 17:39:14 +05301404 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001405 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301406 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001407 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001408
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001409 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001410 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001411 return 0;
1412
Sathya Perla92bf14a2013-08-27 16:57:32 +05301413 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001414 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001415
1416 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301417 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1418 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001419
Vasundhara Volam435452a2015-03-20 06:28:23 -04001420 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001421 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001422 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001423 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001424 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1425 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301426 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001427 return be_set_vlan_promisc(adapter);
1428 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1429 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001431 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432}
1433
Patrick McHardy80d5c362013-04-19 02:04:28 +00001434static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435{
1436 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001437 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001439 /* Packets with VID 0 are always received by Lancer by default */
1440 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301441 return status;
1442
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301443 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301444 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001445
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301446 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301447 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001448
Somnath Kotura6b74e02014-01-21 15:50:55 +05301449 status = be_vid_config(adapter);
1450 if (status) {
1451 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301452 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301453 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301454
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001455 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456}
1457
Patrick McHardy80d5c362013-04-19 02:04:28 +00001458static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459{
1460 struct be_adapter *adapter = netdev_priv(netdev);
1461
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001462 /* Packets with VID 0 are always received by Lancer by default */
1463 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301464 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001465
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301466 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301467 adapter->vlans_added--;
1468
1469 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470}
1471
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001472static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301473{
Sathya Perlaac34b742015-02-06 08:18:40 -05001474 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001475 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1476}
1477
1478static void be_set_all_promisc(struct be_adapter *adapter)
1479{
1480 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1481 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1482}
1483
1484static void be_set_mc_promisc(struct be_adapter *adapter)
1485{
1486 int status;
1487
1488 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1489 return;
1490
1491 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1492 if (!status)
1493 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1494}
1495
1496static void be_set_mc_list(struct be_adapter *adapter)
1497{
1498 int status;
1499
1500 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1501 if (!status)
1502 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1503 else
1504 be_set_mc_promisc(adapter);
1505}
1506
1507static void be_set_uc_list(struct be_adapter *adapter)
1508{
1509 struct netdev_hw_addr *ha;
1510 int i = 1; /* First slot is claimed by the Primary MAC */
1511
1512 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1513 be_cmd_pmac_del(adapter, adapter->if_handle,
1514 adapter->pmac_id[i], 0);
1515
1516 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1517 be_set_all_promisc(adapter);
1518 return;
1519 }
1520
1521 netdev_for_each_uc_addr(ha, adapter->netdev) {
1522 adapter->uc_macs++; /* First slot is for Primary MAC */
1523 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1524 &adapter->pmac_id[adapter->uc_macs], 0);
1525 }
1526}
1527
1528static void be_clear_uc_list(struct be_adapter *adapter)
1529{
1530 int i;
1531
1532 for (i = 1; i < (adapter->uc_macs + 1); i++)
1533 be_cmd_pmac_del(adapter, adapter->if_handle,
1534 adapter->pmac_id[i], 0);
1535 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301536}
1537
Sathya Perlaa54769f2011-10-24 02:45:00 +00001538static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001539{
1540 struct be_adapter *adapter = netdev_priv(netdev);
1541
1542 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001543 be_set_all_promisc(adapter);
1544 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001545 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001546
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001547 /* Interface was previously in promiscuous mode; disable it */
1548 if (be_in_all_promisc(adapter)) {
1549 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001550 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001551 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001552 }
1553
Sathya Perlae7b909a2009-11-22 22:01:10 +00001554 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001555 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001556 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1557 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301558 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001559 }
Kalesh APa0794882014-05-30 19:06:23 +05301560
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001561 if (netdev_uc_count(netdev) != adapter->uc_macs)
1562 be_set_uc_list(adapter);
1563
1564 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001565}
1566
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001567static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1568{
1569 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001570 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001571 int status;
1572
Sathya Perla11ac75e2011-12-13 00:58:50 +00001573 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001574 return -EPERM;
1575
Sathya Perla11ac75e2011-12-13 00:58:50 +00001576 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001577 return -EINVAL;
1578
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301579 /* Proceed further only if user provided MAC is different
1580 * from active MAC
1581 */
1582 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1583 return 0;
1584
Sathya Perla3175d8c2013-07-23 15:25:03 +05301585 if (BEx_chip(adapter)) {
1586 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1587 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001588
Sathya Perla11ac75e2011-12-13 00:58:50 +00001589 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1590 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301591 } else {
1592 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1593 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001594 }
1595
Kalesh APabccf232014-07-17 16:20:24 +05301596 if (status) {
1597 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1598 mac, vf, status);
1599 return be_cmd_status(status);
1600 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001601
Kalesh APabccf232014-07-17 16:20:24 +05301602 ether_addr_copy(vf_cfg->mac_addr, mac);
1603
1604 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001605}
1606
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001607static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301608 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001609{
1610 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001611 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001612
Sathya Perla11ac75e2011-12-13 00:58:50 +00001613 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001614 return -EPERM;
1615
Sathya Perla11ac75e2011-12-13 00:58:50 +00001616 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001617 return -EINVAL;
1618
1619 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001620 vi->max_tx_rate = vf_cfg->tx_rate;
1621 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001622 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1623 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001624 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301625 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001626 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001627
1628 return 0;
1629}
1630
Vasundhara Volam435452a2015-03-20 06:28:23 -04001631static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1632{
1633 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1634 u16 vids[BE_NUM_VLANS_SUPPORTED];
1635 int vf_if_id = vf_cfg->if_handle;
1636 int status;
1637
1638 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001639 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001640 if (status)
1641 return status;
1642
1643 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1644 vids[0] = 0;
1645 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1646 if (!status)
1647 dev_info(&adapter->pdev->dev,
1648 "Cleared guest VLANs on VF%d", vf);
1649
1650 /* After TVT is enabled, disallow VFs to program VLAN filters */
1651 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1652 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1653 ~BE_PRIV_FILTMGMT, vf + 1);
1654 if (!status)
1655 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1656 }
1657 return 0;
1658}
1659
1660static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1661{
1662 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1663 struct device *dev = &adapter->pdev->dev;
1664 int status;
1665
1666 /* Reset Transparent VLAN Tagging. */
1667 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001668 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001669 if (status)
1670 return status;
1671
1672 /* Allow VFs to program VLAN filtering */
1673 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1674 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1675 BE_PRIV_FILTMGMT, vf + 1);
1676 if (!status) {
1677 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1678 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1679 }
1680 }
1681
1682 dev_info(dev,
1683 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1684 return 0;
1685}
1686
Sathya Perla748b5392014-05-09 13:29:13 +05301687static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001688{
1689 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001690 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001691 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001692
Sathya Perla11ac75e2011-12-13 00:58:50 +00001693 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001694 return -EPERM;
1695
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001696 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001697 return -EINVAL;
1698
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001699 if (vlan || qos) {
1700 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001701 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001702 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001703 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001704 }
1705
Kalesh APabccf232014-07-17 16:20:24 +05301706 if (status) {
1707 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001708 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1709 status);
Kalesh APabccf232014-07-17 16:20:24 +05301710 return be_cmd_status(status);
1711 }
1712
1713 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301714 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001715}
1716
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001717static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1718 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001719{
1720 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301721 struct device *dev = &adapter->pdev->dev;
1722 int percent_rate, status = 0;
1723 u16 link_speed = 0;
1724 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001725
Sathya Perla11ac75e2011-12-13 00:58:50 +00001726 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001727 return -EPERM;
1728
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001729 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001730 return -EINVAL;
1731
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001732 if (min_tx_rate)
1733 return -EINVAL;
1734
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301735 if (!max_tx_rate)
1736 goto config_qos;
1737
1738 status = be_cmd_link_status_query(adapter, &link_speed,
1739 &link_status, 0);
1740 if (status)
1741 goto err;
1742
1743 if (!link_status) {
1744 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301745 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301746 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001747 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001748
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301749 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1750 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1751 link_speed);
1752 status = -EINVAL;
1753 goto err;
1754 }
1755
1756 /* On Skyhawk the QOS setting must be done only as a % value */
1757 percent_rate = link_speed / 100;
1758 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1759 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1760 percent_rate);
1761 status = -EINVAL;
1762 goto err;
1763 }
1764
1765config_qos:
1766 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001767 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301768 goto err;
1769
1770 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1771 return 0;
1772
1773err:
1774 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1775 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301776 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001777}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301778
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301779static int be_set_vf_link_state(struct net_device *netdev, int vf,
1780 int link_state)
1781{
1782 struct be_adapter *adapter = netdev_priv(netdev);
1783 int status;
1784
1785 if (!sriov_enabled(adapter))
1786 return -EPERM;
1787
1788 if (vf >= adapter->num_vfs)
1789 return -EINVAL;
1790
1791 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301792 if (status) {
1793 dev_err(&adapter->pdev->dev,
1794 "Link state change on VF %d failed: %#x\n", vf, status);
1795 return be_cmd_status(status);
1796 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301797
Kalesh APabccf232014-07-17 16:20:24 +05301798 adapter->vf_cfg[vf].plink_tracking = link_state;
1799
1800 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301801}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001802
Kalesh APe7bcbd72015-05-06 05:30:32 -04001803static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1804{
1805 struct be_adapter *adapter = netdev_priv(netdev);
1806 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1807 u8 spoofchk;
1808 int status;
1809
1810 if (!sriov_enabled(adapter))
1811 return -EPERM;
1812
1813 if (vf >= adapter->num_vfs)
1814 return -EINVAL;
1815
1816 if (BEx_chip(adapter))
1817 return -EOPNOTSUPP;
1818
1819 if (enable == vf_cfg->spoofchk)
1820 return 0;
1821
1822 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1823
1824 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1825 0, spoofchk);
1826 if (status) {
1827 dev_err(&adapter->pdev->dev,
1828 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1829 return be_cmd_status(status);
1830 }
1831
1832 vf_cfg->spoofchk = enable;
1833 return 0;
1834}
1835
Sathya Perla2632baf2013-10-01 16:00:00 +05301836static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1837 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001838{
Sathya Perla2632baf2013-10-01 16:00:00 +05301839 aic->rx_pkts_prev = rx_pkts;
1840 aic->tx_reqs_prev = tx_pkts;
1841 aic->jiffies = now;
1842}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001843
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001844static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301845{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001846 struct be_adapter *adapter = eqo->adapter;
1847 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301848 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301849 struct be_rx_obj *rxo;
1850 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001851 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301852 ulong now;
1853 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001854 int i;
1855
1856 aic = &adapter->aic_obj[eqo->idx];
1857 if (!aic->enable) {
1858 if (aic->jiffies)
1859 aic->jiffies = 0;
1860 eqd = aic->et_eqd;
1861 return eqd;
1862 }
1863
1864 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1865 do {
1866 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1867 rx_pkts += rxo->stats.rx_pkts;
1868 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1869 }
1870
1871 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1872 do {
1873 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1874 tx_pkts += txo->stats.tx_reqs;
1875 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1876 }
1877
1878 /* Skip, if wrapped around or first calculation */
1879 now = jiffies;
1880 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1881 rx_pkts < aic->rx_pkts_prev ||
1882 tx_pkts < aic->tx_reqs_prev) {
1883 be_aic_update(aic, rx_pkts, tx_pkts, now);
1884 return aic->prev_eqd;
1885 }
1886
1887 delta = jiffies_to_msecs(now - aic->jiffies);
1888 if (delta == 0)
1889 return aic->prev_eqd;
1890
1891 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1892 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1893 eqd = (pps / 15000) << 2;
1894
1895 if (eqd < 8)
1896 eqd = 0;
1897 eqd = min_t(u32, eqd, aic->max_eqd);
1898 eqd = max_t(u32, eqd, aic->min_eqd);
1899
1900 be_aic_update(aic, rx_pkts, tx_pkts, now);
1901
1902 return eqd;
1903}
1904
1905/* For Skyhawk-R only */
1906static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1907{
1908 struct be_adapter *adapter = eqo->adapter;
1909 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1910 ulong now = jiffies;
1911 int eqd;
1912 u32 mult_enc;
1913
1914 if (!aic->enable)
1915 return 0;
1916
1917 if (time_before_eq(now, aic->jiffies) ||
1918 jiffies_to_msecs(now - aic->jiffies) < 1)
1919 eqd = aic->prev_eqd;
1920 else
1921 eqd = be_get_new_eqd(eqo);
1922
1923 if (eqd > 100)
1924 mult_enc = R2I_DLY_ENC_1;
1925 else if (eqd > 60)
1926 mult_enc = R2I_DLY_ENC_2;
1927 else if (eqd > 20)
1928 mult_enc = R2I_DLY_ENC_3;
1929 else
1930 mult_enc = R2I_DLY_ENC_0;
1931
1932 aic->prev_eqd = eqd;
1933
1934 return mult_enc;
1935}
1936
1937void be_eqd_update(struct be_adapter *adapter, bool force_update)
1938{
1939 struct be_set_eqd set_eqd[MAX_EVT_QS];
1940 struct be_aic_obj *aic;
1941 struct be_eq_obj *eqo;
1942 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001943
Sathya Perla2632baf2013-10-01 16:00:00 +05301944 for_all_evt_queues(adapter, eqo, i) {
1945 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001946 eqd = be_get_new_eqd(eqo);
1947 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301948 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1949 set_eqd[num].eq_id = eqo->q.id;
1950 aic->prev_eqd = eqd;
1951 num++;
1952 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001953 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301954
1955 if (num)
1956 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001957}
1958
Sathya Perla3abcded2010-10-03 22:12:27 -07001959static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301960 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001961{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001962 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001963
Sathya Perlaab1594e2011-07-25 19:10:15 +00001964 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001965 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001966 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001967 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05301968 if (rxcp->tunneled)
1969 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001970 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001971 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001972 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001973 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001974 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975}
1976
Sathya Perla2e588f82011-03-11 02:49:26 +00001977static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001978{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001979 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301980 * Also ignore ipcksm for ipv6 pkts
1981 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001982 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301983 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001984}
1985
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301986static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001988 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301991 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001992
Sathya Perla3abcded2010-10-03 22:12:27 -07001993 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994 BUG_ON(!rx_page_info->page);
1995
Sathya Perlae50287b2014-03-04 12:14:38 +05301996 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001997 dma_unmap_page(&adapter->pdev->dev,
1998 dma_unmap_addr(rx_page_info, bus),
1999 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302000 rx_page_info->last_frag = false;
2001 } else {
2002 dma_sync_single_for_cpu(&adapter->pdev->dev,
2003 dma_unmap_addr(rx_page_info, bus),
2004 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002005 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002006
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302007 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008 atomic_dec(&rxq->used);
2009 return rx_page_info;
2010}
2011
2012/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002013static void be_rx_compl_discard(struct be_rx_obj *rxo,
2014 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002017 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002019 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302020 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002021 put_page(page_info->page);
2022 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002023 }
2024}
2025
2026/*
2027 * skb_fill_rx_data forms a complete skb for an ether frame
2028 * indicated by rxcp.
2029 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002030static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2031 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002032{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002034 u16 i, j;
2035 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002036 u8 *start;
2037
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302038 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039 start = page_address(page_info->page) + page_info->page_offset;
2040 prefetch(start);
2041
2042 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002043 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002044
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002045 skb->len = curr_frag_len;
2046 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002047 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002048 /* Complete packet has now been moved to data */
2049 put_page(page_info->page);
2050 skb->data_len = 0;
2051 skb->tail += curr_frag_len;
2052 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002053 hdr_len = ETH_HLEN;
2054 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002056 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057 skb_shinfo(skb)->frags[0].page_offset =
2058 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302059 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2060 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002061 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002062 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063 skb->tail += hdr_len;
2064 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002065 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066
Sathya Perla2e588f82011-03-11 02:49:26 +00002067 if (rxcp->pkt_size <= rx_frag_size) {
2068 BUG_ON(rxcp->num_rcvd != 1);
2069 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070 }
2071
2072 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002073 remaining = rxcp->pkt_size - curr_frag_len;
2074 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302075 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002076 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002078 /* Coalesce all frags from the same physical page in one slot */
2079 if (page_info->page_offset == 0) {
2080 /* Fresh page */
2081 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002082 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002083 skb_shinfo(skb)->frags[j].page_offset =
2084 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002085 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002086 skb_shinfo(skb)->nr_frags++;
2087 } else {
2088 put_page(page_info->page);
2089 }
2090
Eric Dumazet9e903e02011-10-18 21:00:24 +00002091 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002092 skb->len += curr_frag_len;
2093 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002094 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002095 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002096 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002097 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002098 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099}
2100
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002101/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302102static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002103 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002104{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002105 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002106 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002108
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002109 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002110 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002111 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002112 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113 return;
2114 }
2115
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002116 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002117
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002118 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002119 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002120 else
2121 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002123 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002124 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002125 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002126 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302127
Tom Herbertb6c0e892014-08-27 21:27:17 -07002128 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302129 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002130
Jiri Pirko343e43c2011-08-25 02:50:51 +00002131 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002132 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002133
2134 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135}
2136
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002137/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002138static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2139 struct napi_struct *napi,
2140 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002144 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002145 u16 remaining, curr_frag_len;
2146 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002147
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002148 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002149 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002150 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002151 return;
2152 }
2153
Sathya Perla2e588f82011-03-11 02:49:26 +00002154 remaining = rxcp->pkt_size;
2155 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302156 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002157
2158 curr_frag_len = min(remaining, rx_frag_size);
2159
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002160 /* Coalesce all frags from the same physical page in one slot */
2161 if (i == 0 || page_info->page_offset == 0) {
2162 /* First frag or Fresh page */
2163 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002164 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002165 skb_shinfo(skb)->frags[j].page_offset =
2166 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002167 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002168 } else {
2169 put_page(page_info->page);
2170 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002171 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002172 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002173 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002174 memset(page_info, 0, sizeof(*page_info));
2175 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002176 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002177
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002178 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002179 skb->len = rxcp->pkt_size;
2180 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002181 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002182 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002183 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002184 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302185
Tom Herbertb6c0e892014-08-27 21:27:17 -07002186 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002187
Jiri Pirko343e43c2011-08-25 02:50:51 +00002188 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002189 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002190
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002191 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002192}
2193
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002194static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2195 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002196{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302197 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2198 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2199 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2200 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2201 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2202 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2203 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2204 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2205 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2206 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2207 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002208 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302209 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2210 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002211 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302212 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302213 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302214 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002215}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002216
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002217static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2218 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002219{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302220 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2221 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2222 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2223 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2224 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2225 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2226 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2227 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2228 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2229 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2230 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002231 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302232 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2233 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002234 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302235 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2236 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002237}
2238
2239static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2240{
2241 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2242 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2243 struct be_adapter *adapter = rxo->adapter;
2244
2245 /* For checking the valid bit it is Ok to use either definition as the
2246 * valid bit is at the same position in both v0 and v1 Rx compl */
2247 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248 return NULL;
2249
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002250 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002251 be_dws_le_to_cpu(compl, sizeof(*compl));
2252
2253 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002255 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002257
Somnath Koture38b1702013-05-29 22:55:56 +00002258 if (rxcp->ip_frag)
2259 rxcp->l4_csum = 0;
2260
Sathya Perla15d72182011-03-21 20:49:26 +00002261 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302262 /* In QNQ modes, if qnq bit is not set, then the packet was
2263 * tagged only with the transparent outer vlan-tag and must
2264 * not be treated as a vlan packet by host
2265 */
2266 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002267 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002268
Sathya Perla15d72182011-03-21 20:49:26 +00002269 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002270 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002271
Somnath Kotur939cf302011-08-18 21:51:49 -07002272 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302273 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002274 rxcp->vlanf = 0;
2275 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002276
2277 /* As the compl has been parsed, reset it; we wont touch it again */
2278 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002279
Sathya Perla3abcded2010-10-03 22:12:27 -07002280 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281 return rxcp;
2282}
2283
Eric Dumazet1829b082011-03-01 05:48:12 +00002284static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002287
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002289 gfp |= __GFP_COMP;
2290 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002291}
2292
2293/*
2294 * Allocate a page, split it to fragments of size rx_frag_size and post as
2295 * receive buffers to BE
2296 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302297static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002298{
Sathya Perla3abcded2010-10-03 22:12:27 -07002299 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002300 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002301 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002303 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304 struct be_eth_rx_d *rxd;
2305 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302306 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307
Sathya Perla3abcded2010-10-03 22:12:27 -07002308 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302309 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002310 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002311 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002313 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314 break;
2315 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002316 page_dmaaddr = dma_map_page(dev, pagep, 0,
2317 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002318 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002319 if (dma_mapping_error(dev, page_dmaaddr)) {
2320 put_page(pagep);
2321 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302322 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002323 break;
2324 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302325 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326 } else {
2327 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302328 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002329 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302330 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332
2333 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302334 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002335 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2336 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337
2338 /* Any space left in the current big page for another frag? */
2339 if ((page_offset + rx_frag_size + rx_frag_size) >
2340 adapter->big_page_size) {
2341 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302342 page_info->last_frag = true;
2343 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2344 } else {
2345 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002347
2348 prev_page_info = page_info;
2349 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002350 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002351 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302352
2353 /* Mark the last frag of a page when we break out of the above loop
2354 * with no more slots available in the RXQ
2355 */
2356 if (pagep) {
2357 prev_page_info->last_frag = true;
2358 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2359 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002360
2361 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302363 if (rxo->rx_post_starved)
2364 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302365 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002366 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302367 be_rxq_notify(adapter, rxq->id, notify);
2368 posted -= notify;
2369 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002370 } else if (atomic_read(&rxq->used) == 0) {
2371 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002372 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374}
2375
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302376static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302378 struct be_queue_info *tx_cq = &txo->cq;
2379 struct be_tx_compl_info *txcp = &txo->txcp;
2380 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002381
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302382 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383 return NULL;
2384
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302385 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002386 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302387 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302389 txcp->status = GET_TX_COMPL_BITS(status, compl);
2390 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002391
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302392 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393 queue_tail_inc(tx_cq);
2394 return txcp;
2395}
2396
Sathya Perla3c8def92011-06-12 20:01:58 +00002397static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302398 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002399{
Sathya Perla3c8def92011-06-12 20:01:58 +00002400 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002401 struct be_queue_info *txq = &txo->q;
2402 u16 frag_index, num_wrbs = 0;
2403 struct sk_buff *skb = NULL;
2404 bool unmap_skb_hdr = false;
2405 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002406
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002407 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002408 if (sent_skbs[txq->tail]) {
2409 /* Free skb from prev req */
2410 if (skb)
2411 dev_consume_skb_any(skb);
2412 skb = sent_skbs[txq->tail];
2413 sent_skbs[txq->tail] = NULL;
2414 queue_tail_inc(txq); /* skip hdr wrb */
2415 num_wrbs++;
2416 unmap_skb_hdr = true;
2417 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002418 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002419 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002420 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002421 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002422 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002423 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002424 num_wrbs++;
2425 } while (frag_index != last_index);
2426 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002427
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002428 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002429}
2430
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002431/* Return the number of events in the event queue */
2432static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002433{
2434 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002435 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002436
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437 do {
2438 eqe = queue_tail_node(&eqo->q);
2439 if (eqe->evt == 0)
2440 break;
2441
2442 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002443 eqe->evt = 0;
2444 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002445 queue_tail_inc(&eqo->q);
2446 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002447
2448 return num;
2449}
2450
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002451/* Leaves the EQ is disarmed state */
2452static void be_eq_clean(struct be_eq_obj *eqo)
2453{
2454 int num = events_get(eqo);
2455
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002456 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002457}
2458
Kalesh AP99b44302015-08-05 03:27:49 -04002459/* Free posted rx buffers that were not used */
2460static void be_rxq_clean(struct be_rx_obj *rxo)
2461{
2462 struct be_queue_info *rxq = &rxo->q;
2463 struct be_rx_page_info *page_info;
2464
2465 while (atomic_read(&rxq->used) > 0) {
2466 page_info = get_rx_page_info(rxo);
2467 put_page(page_info->page);
2468 memset(page_info, 0, sizeof(*page_info));
2469 }
2470 BUG_ON(atomic_read(&rxq->used));
2471 rxq->tail = 0;
2472 rxq->head = 0;
2473}
2474
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002475static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002476{
Sathya Perla3abcded2010-10-03 22:12:27 -07002477 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002478 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002479 struct be_adapter *adapter = rxo->adapter;
2480 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481
Sathya Perlad23e9462012-12-17 19:38:51 +00002482 /* Consume pending rx completions.
2483 * Wait for the flush completion (identified by zero num_rcvd)
2484 * to arrive. Notify CQ even when there are no more CQ entries
2485 * for HW to flush partially coalesced CQ entries.
2486 * In Lancer, there is no need to wait for flush compl.
2487 */
2488 for (;;) {
2489 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302490 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002491 if (lancer_chip(adapter))
2492 break;
2493
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302494 if (flush_wait++ > 50 ||
2495 be_check_error(adapter,
2496 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002497 dev_warn(&adapter->pdev->dev,
2498 "did not receive flush compl\n");
2499 break;
2500 }
2501 be_cq_notify(adapter, rx_cq->id, true, 0);
2502 mdelay(1);
2503 } else {
2504 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002505 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002506 if (rxcp->num_rcvd == 0)
2507 break;
2508 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002509 }
2510
Sathya Perlad23e9462012-12-17 19:38:51 +00002511 /* After cleanup, leave the CQ in unarmed state */
2512 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002513}
2514
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002515static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002517 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2518 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302519 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002520 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302521 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002522 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002523
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302524 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002525 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002526 pending_txqs = adapter->num_tx_qs;
2527
2528 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302529 cmpl = 0;
2530 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002531 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302532 while ((txcp = be_tx_compl_get(txo))) {
2533 num_wrbs +=
2534 be_tx_compl_process(adapter, txo,
2535 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002536 cmpl++;
2537 }
2538 if (cmpl) {
2539 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2540 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302541 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002542 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302543 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002544 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002545 }
2546
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302547 if (pending_txqs == 0 || ++timeo > 10 ||
2548 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002549 break;
2550
2551 mdelay(1);
2552 } while (true);
2553
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002554 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002555 for_all_tx_queues(adapter, txo, i) {
2556 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002557
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002558 if (atomic_read(&txq->used)) {
2559 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2560 i, atomic_read(&txq->used));
2561 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002562 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002563 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2564 txq->len);
2565 /* Use the tx-compl process logic to handle requests
2566 * that were not sent to the HW.
2567 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002568 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2569 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002570 BUG_ON(atomic_read(&txq->used));
2571 txo->pend_wrb_cnt = 0;
2572 /* Since hw was never notified of these requests,
2573 * reset TXQ indices
2574 */
2575 txq->head = notified_idx;
2576 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002577 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002578 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002579}
2580
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002581static void be_evt_queues_destroy(struct be_adapter *adapter)
2582{
2583 struct be_eq_obj *eqo;
2584 int i;
2585
2586 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002587 if (eqo->q.created) {
2588 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002589 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302590 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302591 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002592 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002593 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002594 be_queue_free(adapter, &eqo->q);
2595 }
2596}
2597
2598static int be_evt_queues_create(struct be_adapter *adapter)
2599{
2600 struct be_queue_info *eq;
2601 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302602 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002603 int i, rc;
2604
Sathya Perla92bf14a2013-08-27 16:57:32 +05302605 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2606 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002607
2608 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302609 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002610
Sathya Perla2632baf2013-10-01 16:00:00 +05302611 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002612 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002613 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302614 aic->max_eqd = BE_MAX_EQD;
2615 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002616
2617 eq = &eqo->q;
2618 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302619 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002620 if (rc)
2621 return rc;
2622
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302623 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624 if (rc)
2625 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002626
2627 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2628 return -ENOMEM;
2629 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2630 eqo->affinity_mask);
2631 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2632 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002633 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002634 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002635}
2636
Sathya Perla5fb379e2009-06-18 00:02:59 +00002637static void be_mcc_queues_destroy(struct be_adapter *adapter)
2638{
2639 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002640
Sathya Perla8788fdc2009-07-27 22:52:03 +00002641 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002642 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002643 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002644 be_queue_free(adapter, q);
2645
Sathya Perla8788fdc2009-07-27 22:52:03 +00002646 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002647 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002648 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002649 be_queue_free(adapter, q);
2650}
2651
2652/* Must be called only after TX qs are created as MCC shares TX EQ */
2653static int be_mcc_queues_create(struct be_adapter *adapter)
2654{
2655 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002656
Sathya Perla8788fdc2009-07-27 22:52:03 +00002657 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002658 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302659 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002660 goto err;
2661
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002662 /* Use the default EQ for MCC completions */
2663 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002664 goto mcc_cq_free;
2665
Sathya Perla8788fdc2009-07-27 22:52:03 +00002666 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002667 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2668 goto mcc_cq_destroy;
2669
Sathya Perla8788fdc2009-07-27 22:52:03 +00002670 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002671 goto mcc_q_free;
2672
2673 return 0;
2674
2675mcc_q_free:
2676 be_queue_free(adapter, q);
2677mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002678 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002679mcc_cq_free:
2680 be_queue_free(adapter, cq);
2681err:
2682 return -1;
2683}
2684
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002685static void be_tx_queues_destroy(struct be_adapter *adapter)
2686{
2687 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002688 struct be_tx_obj *txo;
2689 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002690
Sathya Perla3c8def92011-06-12 20:01:58 +00002691 for_all_tx_queues(adapter, txo, i) {
2692 q = &txo->q;
2693 if (q->created)
2694 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2695 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696
Sathya Perla3c8def92011-06-12 20:01:58 +00002697 q = &txo->cq;
2698 if (q->created)
2699 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2700 be_queue_free(adapter, q);
2701 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002702}
2703
Sathya Perla77071332013-08-27 16:57:34 +05302704static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002705{
Sathya Perla73f394e2015-03-26 03:05:09 -04002706 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002707 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002708 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302709 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710
Sathya Perla92bf14a2013-08-27 16:57:32 +05302711 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002712
Sathya Perla3c8def92011-06-12 20:01:58 +00002713 for_all_tx_queues(adapter, txo, i) {
2714 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2716 sizeof(struct be_eth_tx_compl));
2717 if (status)
2718 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002719
John Stultz827da442013-10-07 15:51:58 -07002720 u64_stats_init(&txo->stats.sync);
2721 u64_stats_init(&txo->stats.sync_compl);
2722
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002723 /* If num_evt_qs is less than num_tx_qs, then more than
2724 * one txq share an eq
2725 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002726 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2727 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002728 if (status)
2729 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002730
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002731 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2732 sizeof(struct be_eth_wrb));
2733 if (status)
2734 return status;
2735
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002736 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002737 if (status)
2738 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002739
2740 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2741 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002742 }
2743
Sathya Perlad3791422012-09-28 04:39:44 +00002744 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2745 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002746 return 0;
2747}
2748
2749static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002750{
2751 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002752 struct be_rx_obj *rxo;
2753 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754
Sathya Perla3abcded2010-10-03 22:12:27 -07002755 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002756 q = &rxo->cq;
2757 if (q->created)
2758 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2759 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002760 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002761}
2762
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002763static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002764{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002765 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002766 struct be_rx_obj *rxo;
2767 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002768
Sathya Perla92bf14a2013-08-27 16:57:32 +05302769 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002770 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302771
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002772 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2773 if (adapter->num_rss_qs <= 1)
2774 adapter->num_rss_qs = 0;
2775
2776 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2777
2778 /* When the interface is not capable of RSS rings (and there is no
2779 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002780 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002781 if (adapter->num_rx_qs == 0)
2782 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302783
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002784 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002785 for_all_rx_queues(adapter, rxo, i) {
2786 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002787 cq = &rxo->cq;
2788 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302789 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002790 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002791 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002792
John Stultz827da442013-10-07 15:51:58 -07002793 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002794 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2795 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002796 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002797 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002798 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002799
Sathya Perlad3791422012-09-28 04:39:44 +00002800 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002801 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002802 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002803}
2804
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002805static irqreturn_t be_intx(int irq, void *dev)
2806{
Sathya Perlae49cc342012-11-27 19:50:02 +00002807 struct be_eq_obj *eqo = dev;
2808 struct be_adapter *adapter = eqo->adapter;
2809 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002810
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002811 /* IRQ is not expected when NAPI is scheduled as the EQ
2812 * will not be armed.
2813 * But, this can happen on Lancer INTx where it takes
2814 * a while to de-assert INTx or in BE2 where occasionaly
2815 * an interrupt may be raised even when EQ is unarmed.
2816 * If NAPI is already scheduled, then counting & notifying
2817 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002818 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002819 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002820 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002821 __napi_schedule(&eqo->napi);
2822 if (num_evts)
2823 eqo->spurious_intr = 0;
2824 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002825 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002826
2827 /* Return IRQ_HANDLED only for the the first spurious intr
2828 * after a valid intr to stop the kernel from branding
2829 * this irq as a bad one!
2830 */
2831 if (num_evts || eqo->spurious_intr++ == 0)
2832 return IRQ_HANDLED;
2833 else
2834 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002835}
2836
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002837static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002838{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002839 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002840
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002841 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002842 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002843 return IRQ_HANDLED;
2844}
2845
Sathya Perla2e588f82011-03-11 02:49:26 +00002846static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002847{
Somnath Koture38b1702013-05-29 22:55:56 +00002848 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002849}
2850
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002851static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302852 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853{
Sathya Perla3abcded2010-10-03 22:12:27 -07002854 struct be_adapter *adapter = rxo->adapter;
2855 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002856 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002857 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302858 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002859
2860 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002861 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002862 if (!rxcp)
2863 break;
2864
Sathya Perla12004ae2011-08-02 19:57:46 +00002865 /* Is it a flush compl that has no data */
2866 if (unlikely(rxcp->num_rcvd == 0))
2867 goto loop_continue;
2868
2869 /* Discard compl with partial DMA Lancer B0 */
2870 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002871 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002872 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002873 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002874
Sathya Perla12004ae2011-08-02 19:57:46 +00002875 /* On BE drop pkts that arrive due to imperfect filtering in
2876 * promiscuous mode on some skews
2877 */
2878 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302879 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002880 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002881 goto loop_continue;
2882 }
2883
Sathya Perla6384a4d2013-10-25 10:40:16 +05302884 /* Don't do gro when we're busy_polling */
2885 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002886 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002887 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302888 be_rx_compl_process(rxo, napi, rxcp);
2889
Sathya Perla12004ae2011-08-02 19:57:46 +00002890loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302891 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002892 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002893 }
2894
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002895 if (work_done) {
2896 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002897
Sathya Perla6384a4d2013-10-25 10:40:16 +05302898 /* When an rx-obj gets into post_starved state, just
2899 * let be_worker do the posting.
2900 */
2901 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2902 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302903 be_post_rx_frags(rxo, GFP_ATOMIC,
2904 max_t(u32, MAX_RX_POST,
2905 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002906 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002907
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002908 return work_done;
2909}
2910
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302911static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302912{
2913 switch (status) {
2914 case BE_TX_COMP_HDR_PARSE_ERR:
2915 tx_stats(txo)->tx_hdr_parse_err++;
2916 break;
2917 case BE_TX_COMP_NDMA_ERR:
2918 tx_stats(txo)->tx_dma_err++;
2919 break;
2920 case BE_TX_COMP_ACL_ERR:
2921 tx_stats(txo)->tx_spoof_check_err++;
2922 break;
2923 }
2924}
2925
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302926static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302927{
2928 switch (status) {
2929 case LANCER_TX_COMP_LSO_ERR:
2930 tx_stats(txo)->tx_tso_err++;
2931 break;
2932 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2933 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2934 tx_stats(txo)->tx_spoof_check_err++;
2935 break;
2936 case LANCER_TX_COMP_QINQ_ERR:
2937 tx_stats(txo)->tx_qinq_err++;
2938 break;
2939 case LANCER_TX_COMP_PARITY_ERR:
2940 tx_stats(txo)->tx_internal_parity_err++;
2941 break;
2942 case LANCER_TX_COMP_DMA_ERR:
2943 tx_stats(txo)->tx_dma_err++;
2944 break;
2945 }
2946}
2947
Sathya Perlac8f64612014-09-02 09:56:55 +05302948static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2949 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002950{
Sathya Perlac8f64612014-09-02 09:56:55 +05302951 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302952 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002953
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302954 while ((txcp = be_tx_compl_get(txo))) {
2955 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302956 work_done++;
2957
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302958 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302959 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302960 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302961 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302962 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302963 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002964 }
2965
2966 if (work_done) {
2967 be_cq_notify(adapter, txo->cq.id, true, work_done);
2968 atomic_sub(num_wrbs, &txo->q.used);
2969
2970 /* As Tx wrbs have been freed up, wake up netdev queue
2971 * if it was stopped due to lack of tx wrbs. */
2972 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302973 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002974 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002975 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002976
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002977 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2978 tx_stats(txo)->tx_compl += work_done;
2979 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2980 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002981}
Sathya Perla3c8def92011-06-12 20:01:58 +00002982
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002983#ifdef CONFIG_NET_RX_BUSY_POLL
2984static inline bool be_lock_napi(struct be_eq_obj *eqo)
2985{
2986 bool status = true;
2987
2988 spin_lock(&eqo->lock); /* BH is already disabled */
2989 if (eqo->state & BE_EQ_LOCKED) {
2990 WARN_ON(eqo->state & BE_EQ_NAPI);
2991 eqo->state |= BE_EQ_NAPI_YIELD;
2992 status = false;
2993 } else {
2994 eqo->state = BE_EQ_NAPI;
2995 }
2996 spin_unlock(&eqo->lock);
2997 return status;
2998}
2999
3000static inline void be_unlock_napi(struct be_eq_obj *eqo)
3001{
3002 spin_lock(&eqo->lock); /* BH is already disabled */
3003
3004 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3005 eqo->state = BE_EQ_IDLE;
3006
3007 spin_unlock(&eqo->lock);
3008}
3009
3010static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3011{
3012 bool status = true;
3013
3014 spin_lock_bh(&eqo->lock);
3015 if (eqo->state & BE_EQ_LOCKED) {
3016 eqo->state |= BE_EQ_POLL_YIELD;
3017 status = false;
3018 } else {
3019 eqo->state |= BE_EQ_POLL;
3020 }
3021 spin_unlock_bh(&eqo->lock);
3022 return status;
3023}
3024
3025static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3026{
3027 spin_lock_bh(&eqo->lock);
3028
3029 WARN_ON(eqo->state & (BE_EQ_NAPI));
3030 eqo->state = BE_EQ_IDLE;
3031
3032 spin_unlock_bh(&eqo->lock);
3033}
3034
3035static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3036{
3037 spin_lock_init(&eqo->lock);
3038 eqo->state = BE_EQ_IDLE;
3039}
3040
3041static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3042{
3043 local_bh_disable();
3044
3045 /* It's enough to just acquire napi lock on the eqo to stop
3046 * be_busy_poll() from processing any queueus.
3047 */
3048 while (!be_lock_napi(eqo))
3049 mdelay(1);
3050
3051 local_bh_enable();
3052}
3053
3054#else /* CONFIG_NET_RX_BUSY_POLL */
3055
3056static inline bool be_lock_napi(struct be_eq_obj *eqo)
3057{
3058 return true;
3059}
3060
3061static inline void be_unlock_napi(struct be_eq_obj *eqo)
3062{
3063}
3064
3065static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3066{
3067 return false;
3068}
3069
3070static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3071{
3072}
3073
3074static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3075{
3076}
3077
3078static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3079{
3080}
3081#endif /* CONFIG_NET_RX_BUSY_POLL */
3082
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303083int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003084{
3085 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3086 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003087 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303088 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303089 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003090 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003091
Sathya Perla0b545a62012-11-23 00:27:18 +00003092 num_evts = events_get(eqo);
3093
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303094 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3095 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003096
Sathya Perla6384a4d2013-10-25 10:40:16 +05303097 if (be_lock_napi(eqo)) {
3098 /* This loop will iterate twice for EQ0 in which
3099 * completions of the last RXQ (default one) are also processed
3100 * For other EQs the loop iterates only once
3101 */
3102 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3103 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3104 max_work = max(work, max_work);
3105 }
3106 be_unlock_napi(eqo);
3107 } else {
3108 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003109 }
3110
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003111 if (is_mcc_eqo(eqo))
3112 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003113
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003114 if (max_work < budget) {
3115 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003116
3117 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3118 * delay via a delay multiplier encoding value
3119 */
3120 if (skyhawk_chip(adapter))
3121 mult_enc = be_get_eq_delay_mult_enc(eqo);
3122
3123 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3124 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003125 } else {
3126 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003127 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003128 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003129 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003130}
3131
Sathya Perla6384a4d2013-10-25 10:40:16 +05303132#ifdef CONFIG_NET_RX_BUSY_POLL
3133static int be_busy_poll(struct napi_struct *napi)
3134{
3135 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3136 struct be_adapter *adapter = eqo->adapter;
3137 struct be_rx_obj *rxo;
3138 int i, work = 0;
3139
3140 if (!be_lock_busy_poll(eqo))
3141 return LL_FLUSH_BUSY;
3142
3143 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3144 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3145 if (work)
3146 break;
3147 }
3148
3149 be_unlock_busy_poll(eqo);
3150 return work;
3151}
3152#endif
3153
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003154void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003155{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003156 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3157 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003158 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303159 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003160
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303161 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003162 return;
3163
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003164 if (lancer_chip(adapter)) {
3165 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3166 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303167 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003168 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303169 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003170 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303171 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303172 /* Do not log error messages if its a FW reset */
3173 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3174 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3175 dev_info(dev, "Firmware update in progress\n");
3176 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303177 dev_err(dev, "Error detected in the card\n");
3178 dev_err(dev, "ERR: sliport status 0x%x\n",
3179 sliport_status);
3180 dev_err(dev, "ERR: sliport error1 0x%x\n",
3181 sliport_err1);
3182 dev_err(dev, "ERR: sliport error2 0x%x\n",
3183 sliport_err2);
3184 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003185 }
3186 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003187 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3188 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3189 ue_lo_mask = ioread32(adapter->pcicfg +
3190 PCICFG_UE_STATUS_LOW_MASK);
3191 ue_hi_mask = ioread32(adapter->pcicfg +
3192 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003193
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003194 ue_lo = (ue_lo & ~ue_lo_mask);
3195 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003196
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303197 /* On certain platforms BE hardware can indicate spurious UEs.
3198 * Allow HW to stop working completely in case of a real UE.
3199 * Hence not setting the hw_error for UE detection.
3200 */
3201
3202 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303203 dev_err(dev,
3204 "Unrecoverable Error detected in the adapter");
3205 dev_err(dev, "Please reboot server to recover");
3206 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303207 be_set_error(adapter, BE_ERROR_UE);
3208
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303209 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3210 if (ue_lo & 1)
3211 dev_err(dev, "UE: %s bit set\n",
3212 ue_status_low_desc[i]);
3213 }
3214 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3215 if (ue_hi & 1)
3216 dev_err(dev, "UE: %s bit set\n",
3217 ue_status_hi_desc[i]);
3218 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303219 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003220 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003221}
3222
Sathya Perla8d56ff12009-11-22 22:02:26 +00003223static void be_msix_disable(struct be_adapter *adapter)
3224{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003225 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003226 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003227 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303228 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003229 }
3230}
3231
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003232static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003233{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003234 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003235 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003236
Sathya Perla92bf14a2013-08-27 16:57:32 +05303237 /* If RoCE is supported, program the max number of NIC vectors that
3238 * may be configured via set-channels, along with vectors needed for
3239 * RoCe. Else, just program the number we'll use initially.
3240 */
3241 if (be_roce_supported(adapter))
3242 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3243 2 * num_online_cpus());
3244 else
3245 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003246
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003247 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003248 adapter->msix_entries[i].entry = i;
3249
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003250 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3251 MIN_MSIX_VECTORS, num_vec);
3252 if (num_vec < 0)
3253 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003254
Sathya Perla92bf14a2013-08-27 16:57:32 +05303255 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3256 adapter->num_msix_roce_vec = num_vec / 2;
3257 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3258 adapter->num_msix_roce_vec);
3259 }
3260
3261 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3262
3263 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3264 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003265 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003266
3267fail:
3268 dev_warn(dev, "MSIx enable failed\n");
3269
3270 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003271 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003272 return num_vec;
3273 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274}
3275
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003276static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303277 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003278{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303279 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003280}
3281
3282static int be_msix_register(struct be_adapter *adapter)
3283{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003284 struct net_device *netdev = adapter->netdev;
3285 struct be_eq_obj *eqo;
3286 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003287
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003288 for_all_evt_queues(adapter, eqo, i) {
3289 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3290 vec = be_msix_vec_get(adapter, eqo);
3291 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003292 if (status)
3293 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003294
3295 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003296 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003297
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003298 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003299err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003300 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3301 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3302 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303303 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003304 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003305 return status;
3306}
3307
3308static int be_irq_register(struct be_adapter *adapter)
3309{
3310 struct net_device *netdev = adapter->netdev;
3311 int status;
3312
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003313 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314 status = be_msix_register(adapter);
3315 if (status == 0)
3316 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003317 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003318 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003319 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003320 }
3321
Sathya Perlae49cc342012-11-27 19:50:02 +00003322 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003323 netdev->irq = adapter->pdev->irq;
3324 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003325 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003326 if (status) {
3327 dev_err(&adapter->pdev->dev,
3328 "INTx request IRQ failed - err %d\n", status);
3329 return status;
3330 }
3331done:
3332 adapter->isr_registered = true;
3333 return 0;
3334}
3335
3336static void be_irq_unregister(struct be_adapter *adapter)
3337{
3338 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003339 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003340 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003341
3342 if (!adapter->isr_registered)
3343 return;
3344
3345 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003346 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003347 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003348 goto done;
3349 }
3350
3351 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003352 for_all_evt_queues(adapter, eqo, i) {
3353 vec = be_msix_vec_get(adapter, eqo);
3354 irq_set_affinity_hint(vec, NULL);
3355 free_irq(vec, eqo);
3356 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003357
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003358done:
3359 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003360}
3361
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003362static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003363{
3364 struct be_queue_info *q;
3365 struct be_rx_obj *rxo;
3366 int i;
3367
3368 for_all_rx_queues(adapter, rxo, i) {
3369 q = &rxo->q;
3370 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003371 /* If RXQs are destroyed while in an "out of buffer"
3372 * state, there is a possibility of an HW stall on
3373 * Lancer. So, post 64 buffers to each queue to relieve
3374 * the "out of buffer" condition.
3375 * Make sure there's space in the RXQ before posting.
3376 */
3377 if (lancer_chip(adapter)) {
3378 be_rx_cq_clean(rxo);
3379 if (atomic_read(&q->used) == 0)
3380 be_post_rx_frags(rxo, GFP_KERNEL,
3381 MAX_RX_POST);
3382 }
3383
Sathya Perla482c9e72011-06-29 23:33:17 +00003384 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003385 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003386 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003387 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003388 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003389 }
3390}
3391
Kalesh APbcc84142015-08-05 03:27:48 -04003392static void be_disable_if_filters(struct be_adapter *adapter)
3393{
3394 be_cmd_pmac_del(adapter, adapter->if_handle,
3395 adapter->pmac_id[0], 0);
3396
3397 be_clear_uc_list(adapter);
3398
3399 /* The IFACE flags are enabled in the open path and cleared
3400 * in the close path. When a VF gets detached from the host and
3401 * assigned to a VM the following happens:
3402 * - VF's IFACE flags get cleared in the detach path
3403 * - IFACE create is issued by the VF in the attach path
3404 * Due to a bug in the BE3/Skyhawk-R FW
3405 * (Lancer FW doesn't have the bug), the IFACE capability flags
3406 * specified along with the IFACE create cmd issued by a VF are not
3407 * honoured by FW. As a consequence, if a *new* driver
3408 * (that enables/disables IFACE flags in open/close)
3409 * is loaded in the host and an *old* driver is * used by a VM/VF,
3410 * the IFACE gets created *without* the needed flags.
3411 * To avoid this, disable RX-filter flags only for Lancer.
3412 */
3413 if (lancer_chip(adapter)) {
3414 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3415 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3416 }
3417}
3418
Sathya Perla889cd4b2010-05-30 23:33:45 +00003419static int be_close(struct net_device *netdev)
3420{
3421 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003422 struct be_eq_obj *eqo;
3423 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003424
Kalesh APe1ad8e32014-04-14 16:12:41 +05303425 /* This protection is needed as be_close() may be called even when the
3426 * adapter is in cleared state (after eeh perm failure)
3427 */
3428 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3429 return 0;
3430
Kalesh APbcc84142015-08-05 03:27:48 -04003431 be_disable_if_filters(adapter);
3432
Parav Pandit045508a2012-03-26 14:27:13 +00003433 be_roce_dev_close(adapter);
3434
Ivan Veceradff345c52013-11-27 08:59:32 +01003435 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3436 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003437 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303438 be_disable_busy_poll(eqo);
3439 }
David S. Miller71237b62013-11-28 18:53:36 -05003440 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003441 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003442
3443 be_async_mcc_disable(adapter);
3444
3445 /* Wait for all pending tx completions to arrive so that
3446 * all tx skbs are freed.
3447 */
Sathya Perlafba87552013-05-08 02:05:50 +00003448 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303449 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003450
3451 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003452
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003453 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003454 if (msix_enabled(adapter))
3455 synchronize_irq(be_msix_vec_get(adapter, eqo));
3456 else
3457 synchronize_irq(netdev->irq);
3458 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003459 }
3460
Sathya Perla889cd4b2010-05-30 23:33:45 +00003461 be_irq_unregister(adapter);
3462
Sathya Perla482c9e72011-06-29 23:33:17 +00003463 return 0;
3464}
3465
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003466static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003467{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003468 struct rss_info *rss = &adapter->rss_info;
3469 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003470 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003471 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003472
3473 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003474 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3475 sizeof(struct be_eth_rx_d));
3476 if (rc)
3477 return rc;
3478 }
3479
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003480 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3481 rxo = default_rxo(adapter);
3482 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3483 rx_frag_size, adapter->if_handle,
3484 false, &rxo->rss_id);
3485 if (rc)
3486 return rc;
3487 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003488
3489 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003490 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003491 rx_frag_size, adapter->if_handle,
3492 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003493 if (rc)
3494 return rc;
3495 }
3496
3497 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003498 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003499 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303500 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003501 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303502 rss->rsstable[j + i] = rxo->rss_id;
3503 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003504 }
3505 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303506 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3507 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003508
3509 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303510 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3511 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303512 } else {
3513 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303514 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303515 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003516
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003517 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303518 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Ivan Vecerad5d30982015-11-13 11:36:58 +01003519 RSS_INDIR_TABLE_LEN, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303520 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303521 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303522 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003523 }
3524
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003525 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303526
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003527 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3528 * which is a queue empty condition
3529 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003530 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003531 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3532
Sathya Perla889cd4b2010-05-30 23:33:45 +00003533 return 0;
3534}
3535
Kalesh APbcc84142015-08-05 03:27:48 -04003536static int be_enable_if_filters(struct be_adapter *adapter)
3537{
3538 int status;
3539
3540 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3541 if (status)
3542 return status;
3543
3544 /* For BE3 VFs, the PF programs the initial MAC address */
3545 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3546 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3547 adapter->if_handle,
3548 &adapter->pmac_id[0], 0);
3549 if (status)
3550 return status;
3551 }
3552
3553 if (adapter->vlans_added)
3554 be_vid_config(adapter);
3555
3556 be_set_rx_mode(adapter->netdev);
3557
3558 return 0;
3559}
3560
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003561static int be_open(struct net_device *netdev)
3562{
3563 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003564 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003565 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003566 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003567 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003568 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003569
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003570 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003571 if (status)
3572 goto err;
3573
Kalesh APbcc84142015-08-05 03:27:48 -04003574 status = be_enable_if_filters(adapter);
3575 if (status)
3576 goto err;
3577
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003578 status = be_irq_register(adapter);
3579 if (status)
3580 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003581
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003582 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003583 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003584
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003585 for_all_tx_queues(adapter, txo, i)
3586 be_cq_notify(adapter, txo->cq.id, true, 0);
3587
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003588 be_async_mcc_enable(adapter);
3589
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003590 for_all_evt_queues(adapter, eqo, i) {
3591 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303592 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003593 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003594 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003595 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003596
Sathya Perla323ff712012-09-28 04:39:43 +00003597 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003598 if (!status)
3599 be_link_status_update(adapter, link_status);
3600
Sathya Perlafba87552013-05-08 02:05:50 +00003601 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003602 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303603
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303604#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303605 if (skyhawk_chip(adapter))
3606 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303607#endif
3608
Sathya Perla889cd4b2010-05-30 23:33:45 +00003609 return 0;
3610err:
3611 be_close(adapter->netdev);
3612 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003613}
3614
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003615static int be_setup_wol(struct be_adapter *adapter, bool enable)
3616{
Kalesh Purayil145155e2015-07-10 05:32:43 -04003617 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003618 struct be_dma_mem cmd;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003619 u8 mac[ETH_ALEN];
Kalesh Purayil145155e2015-07-10 05:32:43 -04003620 int status;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003621
Joe Perchesc7bf7162015-03-02 19:54:47 -08003622 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003623
3624 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Kalesh Purayil145155e2015-07-10 05:32:43 -04003625 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303626 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303627 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003628
3629 if (enable) {
3630 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303631 PCICFG_PM_CONTROL_OFFSET,
3632 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003633 if (status) {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003634 dev_err(dev, "Could not enable Wake-on-lan\n");
3635 goto err;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003636 }
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003637 } else {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003638 ether_addr_copy(mac, adapter->netdev->dev_addr);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003639 }
3640
Kalesh Purayil145155e2015-07-10 05:32:43 -04003641 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3642 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3643 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3644err:
3645 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003646 return status;
3647}
3648
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003649static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3650{
3651 u32 addr;
3652
3653 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3654
3655 mac[5] = (u8)(addr & 0xFF);
3656 mac[4] = (u8)((addr >> 8) & 0xFF);
3657 mac[3] = (u8)((addr >> 16) & 0xFF);
3658 /* Use the OUI from the current MAC address */
3659 memcpy(mac, adapter->netdev->dev_addr, 3);
3660}
3661
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003662/*
3663 * Generate a seed MAC address from the PF MAC Address using jhash.
3664 * MAC Address for VFs are assigned incrementally starting from the seed.
3665 * These addresses are programmed in the ASIC by the PF and the VF driver
3666 * queries for the MAC address during its probe.
3667 */
Sathya Perla4c876612013-02-03 20:30:11 +00003668static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003669{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003670 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003671 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003672 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003673 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003674
3675 be_vf_eth_addr_generate(adapter, mac);
3676
Sathya Perla11ac75e2011-12-13 00:58:50 +00003677 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303678 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003679 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003680 vf_cfg->if_handle,
3681 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303682 else
3683 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3684 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003685
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003686 if (status)
3687 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303688 "Mac address assignment failed for VF %d\n",
3689 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003690 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003691 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003692
3693 mac[5] += 1;
3694 }
3695 return status;
3696}
3697
Sathya Perla4c876612013-02-03 20:30:11 +00003698static int be_vfs_mac_query(struct be_adapter *adapter)
3699{
3700 int status, vf;
3701 u8 mac[ETH_ALEN];
3702 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003703
3704 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303705 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3706 mac, vf_cfg->if_handle,
3707 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003708 if (status)
3709 return status;
3710 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3711 }
3712 return 0;
3713}
3714
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003715static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003716{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003717 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003718 u32 vf;
3719
Sathya Perla257a3fe2013-06-14 15:54:51 +05303720 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003721 dev_warn(&adapter->pdev->dev,
3722 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003723 goto done;
3724 }
3725
Sathya Perlab4c1df92013-05-08 02:05:47 +00003726 pci_disable_sriov(adapter->pdev);
3727
Sathya Perla11ac75e2011-12-13 00:58:50 +00003728 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303729 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003730 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3731 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303732 else
3733 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3734 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003735
Sathya Perla11ac75e2011-12-13 00:58:50 +00003736 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3737 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003738done:
3739 kfree(adapter->vf_cfg);
3740 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303741 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003742}
3743
Sathya Perla77071332013-08-27 16:57:34 +05303744static void be_clear_queues(struct be_adapter *adapter)
3745{
3746 be_mcc_queues_destroy(adapter);
3747 be_rx_cqs_destroy(adapter);
3748 be_tx_queues_destroy(adapter);
3749 be_evt_queues_destroy(adapter);
3750}
3751
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303752static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003753{
Sathya Perla191eb752012-02-23 18:50:13 +00003754 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3755 cancel_delayed_work_sync(&adapter->work);
3756 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3757 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303758}
3759
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003760static void be_cancel_err_detection(struct be_adapter *adapter)
3761{
3762 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3763 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3764 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3765 }
3766}
3767
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303768#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303769static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3770{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003771 struct net_device *netdev = adapter->netdev;
3772
Sathya Perlac9c47142014-03-27 10:46:19 +05303773 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3774 be_cmd_manage_iface(adapter, adapter->if_handle,
3775 OP_CONVERT_TUNNEL_TO_NORMAL);
3776
3777 if (adapter->vxlan_port)
3778 be_cmd_set_vxlan_port(adapter, 0);
3779
3780 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3781 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003782
3783 netdev->hw_enc_features = 0;
3784 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303785 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303786}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303787#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303788
Vasundhara Volamf2858732015-03-04 00:44:33 -05003789static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3790{
3791 struct be_resources res = adapter->pool_res;
3792 u16 num_vf_qs = 1;
3793
3794 /* Distribute the queue resources equally among the PF and it's VFs
3795 * Do not distribute queue resources in multi-channel configuration.
3796 */
3797 if (num_vfs && !be_is_mc(adapter)) {
3798 /* If number of VFs requested is 8 less than max supported,
3799 * assign 8 queue pairs to the PF and divide the remaining
3800 * resources evenly among the VFs
3801 */
3802 if (num_vfs < (be_max_vfs(adapter) - 8))
3803 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3804 else
3805 num_vf_qs = res.max_rss_qs / num_vfs;
3806
3807 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3808 * interfaces per port. Provide RSS on VFs, only if number
3809 * of VFs requested is less than MAX_RSS_IFACES limit.
3810 */
3811 if (num_vfs >= MAX_RSS_IFACES)
3812 num_vf_qs = 1;
3813 }
3814 return num_vf_qs;
3815}
3816
Somnath Koturb05004a2013-12-05 12:08:16 +05303817static int be_clear(struct be_adapter *adapter)
3818{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003819 struct pci_dev *pdev = adapter->pdev;
3820 u16 num_vf_qs;
3821
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303822 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003823
Sathya Perla11ac75e2011-12-13 00:58:50 +00003824 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003825 be_vf_clear(adapter);
3826
Vasundhara Volambec84e62014-06-30 13:01:32 +05303827 /* Re-configure FW to distribute resources evenly across max-supported
3828 * number of VFs, only when VFs are not already enabled.
3829 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003830 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3831 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003832 num_vf_qs = be_calculate_vf_qs(adapter,
3833 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303834 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003835 pci_sriov_get_totalvfs(pdev),
3836 num_vf_qs);
3837 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303838
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303839#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303840 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303841#endif
Kalesh APbcc84142015-08-05 03:27:48 -04003842 kfree(adapter->pmac_id);
3843 adapter->pmac_id = NULL;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003844
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003845 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003846
Sathya Perla77071332013-08-27 16:57:34 +05303847 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003848
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003849 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303850 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003851 return 0;
3852}
3853
Sathya Perla4c876612013-02-03 20:30:11 +00003854static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003855{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303856 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04003857 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00003858 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003859 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003860
Kalesh AP0700d812015-01-20 03:51:43 -05003861 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003862 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003863 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003864
Sathya Perla4c876612013-02-03 20:30:11 +00003865 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303866 if (!BE3_chip(adapter)) {
3867 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003868 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303869 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003870 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303871 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003872 /* Prevent VFs from enabling VLAN promiscuous
3873 * mode
3874 */
3875 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3876 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303877 }
Sathya Perla4c876612013-02-03 20:30:11 +00003878
Kalesh APbcc84142015-08-05 03:27:48 -04003879 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3880 BE_IF_FLAGS_BROADCAST |
3881 BE_IF_FLAGS_MULTICAST |
3882 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3883 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3884 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003885 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003886 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003887 }
Kalesh AP0700d812015-01-20 03:51:43 -05003888
3889 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003890}
3891
Sathya Perla39f1d942012-05-08 19:41:24 +00003892static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003893{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003894 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003895 int vf;
3896
Sathya Perla39f1d942012-05-08 19:41:24 +00003897 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3898 GFP_KERNEL);
3899 if (!adapter->vf_cfg)
3900 return -ENOMEM;
3901
Sathya Perla11ac75e2011-12-13 00:58:50 +00003902 for_all_vfs(adapter, vf_cfg, vf) {
3903 vf_cfg->if_handle = -1;
3904 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003905 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003906 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003907}
3908
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003909static int be_vf_setup(struct be_adapter *adapter)
3910{
Sathya Perla4c876612013-02-03 20:30:11 +00003911 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303912 struct be_vf_cfg *vf_cfg;
3913 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003914 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003915
Sathya Perla257a3fe2013-06-14 15:54:51 +05303916 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003917
3918 status = be_vf_setup_init(adapter);
3919 if (status)
3920 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003921
Sathya Perla4c876612013-02-03 20:30:11 +00003922 if (old_vfs) {
3923 for_all_vfs(adapter, vf_cfg, vf) {
3924 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3925 if (status)
3926 goto err;
3927 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003928
Sathya Perla4c876612013-02-03 20:30:11 +00003929 status = be_vfs_mac_query(adapter);
3930 if (status)
3931 goto err;
3932 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303933 status = be_vfs_if_create(adapter);
3934 if (status)
3935 goto err;
3936
Sathya Perla39f1d942012-05-08 19:41:24 +00003937 status = be_vf_eth_addr_config(adapter);
3938 if (status)
3939 goto err;
3940 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003941
Sathya Perla11ac75e2011-12-13 00:58:50 +00003942 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303943 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003944 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3945 vf + 1);
3946 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303947 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003948 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303949 BE_PRIV_FILTMGMT,
3950 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003951 if (!status) {
3952 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303953 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3954 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003955 }
Sathya Perla04a06022013-07-23 15:25:00 +05303956 }
3957
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303958 /* Allow full available bandwidth */
3959 if (!old_vfs)
3960 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003961
Kalesh APe7bcbd72015-05-06 05:30:32 -04003962 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3963 vf_cfg->if_handle, NULL,
3964 &spoofchk);
3965 if (!status)
3966 vf_cfg->spoofchk = spoofchk;
3967
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303968 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303969 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303970 be_cmd_set_logical_link_config(adapter,
3971 IFLA_VF_LINK_STATE_AUTO,
3972 vf+1);
3973 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003974 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003975
3976 if (!old_vfs) {
3977 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3978 if (status) {
3979 dev_err(dev, "SRIOV enable failed\n");
3980 adapter->num_vfs = 0;
3981 goto err;
3982 }
3983 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303984
3985 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003986 return 0;
3987err:
Sathya Perla4c876612013-02-03 20:30:11 +00003988 dev_err(dev, "VF setup failed\n");
3989 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003990 return status;
3991}
3992
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303993/* Converting function_mode bits on BE3 to SH mc_type enums */
3994
3995static u8 be_convert_mc_type(u32 function_mode)
3996{
Suresh Reddy66064db2014-06-23 16:41:29 +05303997 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303998 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303999 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304000 return FLEX10;
4001 else if (function_mode & VNIC_MODE)
4002 return vNIC2;
4003 else if (function_mode & UMC_ENABLED)
4004 return UMC;
4005 else
4006 return MC_NONE;
4007}
4008
Sathya Perla92bf14a2013-08-27 16:57:32 +05304009/* On BE2/BE3 FW does not suggest the supported limits */
4010static void BEx_get_resources(struct be_adapter *adapter,
4011 struct be_resources *res)
4012{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304013 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304014
4015 if (be_physfn(adapter))
4016 res->max_uc_mac = BE_UC_PMAC_COUNT;
4017 else
4018 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4019
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304020 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4021
4022 if (be_is_mc(adapter)) {
4023 /* Assuming that there are 4 channels per port,
4024 * when multi-channel is enabled
4025 */
4026 if (be_is_qnq_mode(adapter))
4027 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4028 else
4029 /* In a non-qnq multichannel mode, the pvid
4030 * takes up one vlan entry
4031 */
4032 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4033 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304034 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304035 }
4036
Sathya Perla92bf14a2013-08-27 16:57:32 +05304037 res->max_mcast_mac = BE_MAX_MC;
4038
Vasundhara Volama5243da2014-03-11 18:53:07 +05304039 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4040 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4041 * *only* if it is RSS-capable.
4042 */
4043 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004044 be_virtfn(adapter) ||
4045 (be_is_mc(adapter) &&
4046 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304047 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304048 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4049 struct be_resources super_nic_res = {0};
4050
4051 /* On a SuperNIC profile, the driver needs to use the
4052 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4053 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05004054 be_cmd_get_profile_config(adapter, &super_nic_res,
4055 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304056 /* Some old versions of BE3 FW don't report max_tx_qs value */
4057 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4058 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304059 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304060 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304061
4062 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4063 !use_sriov && be_physfn(adapter))
4064 res->max_rss_qs = (adapter->be3_native) ?
4065 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4066 res->max_rx_qs = res->max_rss_qs + 1;
4067
Suresh Reddye3dc8672014-01-06 13:02:25 +05304068 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304069 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304070 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4071 else
4072 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304073
4074 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004075 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304076 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4077 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4078}
4079
Sathya Perla30128032011-11-10 19:17:57 +00004080static void be_setup_init(struct be_adapter *adapter)
4081{
4082 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004083 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004084 adapter->if_handle = -1;
4085 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004086 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004087 if (be_physfn(adapter))
4088 adapter->cmd_privileges = MAX_PRIVILEGES;
4089 else
4090 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004091}
4092
Vasundhara Volambec84e62014-06-30 13:01:32 +05304093static int be_get_sriov_config(struct be_adapter *adapter)
4094{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304095 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304096 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304097
Vasundhara Volamf2858732015-03-04 00:44:33 -05004098 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304099
Vasundhara Volamace40af2015-03-04 00:44:34 -05004100 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304101 if (BE3_chip(adapter) && !res.max_vfs) {
4102 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4103 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4104 }
4105
Sathya Perlad3d18312014-08-01 17:47:30 +05304106 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304107
Vasundhara Volamace40af2015-03-04 00:44:34 -05004108 /* If during previous unload of the driver, the VFs were not disabled,
4109 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4110 * Instead use the TotalVFs value stored in the pci-dev struct.
4111 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304112 old_vfs = pci_num_vf(adapter->pdev);
4113 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004114 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4115 old_vfs);
4116
4117 adapter->pool_res.max_vfs =
4118 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304119 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304120 }
4121
4122 return 0;
4123}
4124
Vasundhara Volamace40af2015-03-04 00:44:34 -05004125static void be_alloc_sriov_res(struct be_adapter *adapter)
4126{
4127 int old_vfs = pci_num_vf(adapter->pdev);
4128 u16 num_vf_qs;
4129 int status;
4130
4131 be_get_sriov_config(adapter);
4132
4133 if (!old_vfs)
4134 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4135
4136 /* When the HW is in SRIOV capable configuration, the PF-pool
4137 * resources are given to PF during driver load, if there are no
4138 * old VFs. This facility is not available in BE3 FW.
4139 * Also, this is done by FW in Lancer chip.
4140 */
4141 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4142 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4143 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4144 num_vf_qs);
4145 if (status)
4146 dev_err(&adapter->pdev->dev,
4147 "Failed to optimize SRIOV resources\n");
4148 }
4149}
4150
Sathya Perla92bf14a2013-08-27 16:57:32 +05304151static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004152{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304153 struct device *dev = &adapter->pdev->dev;
4154 struct be_resources res = {0};
4155 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004156
Sathya Perla92bf14a2013-08-27 16:57:32 +05304157 if (BEx_chip(adapter)) {
4158 BEx_get_resources(adapter, &res);
4159 adapter->res = res;
4160 }
4161
Sathya Perla92bf14a2013-08-27 16:57:32 +05304162 /* For Lancer, SH etc read per-function resource limits from FW.
4163 * GET_FUNC_CONFIG returns per function guaranteed limits.
4164 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4165 */
Sathya Perla4c876612013-02-03 20:30:11 +00004166 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304167 status = be_cmd_get_func_config(adapter, &res);
4168 if (status)
4169 return status;
4170
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004171 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4172 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4173 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4174 res.max_rss_qs -= 1;
4175
Sathya Perla92bf14a2013-08-27 16:57:32 +05304176 /* If RoCE may be enabled stash away half the EQs for RoCE */
4177 if (be_roce_supported(adapter))
4178 res.max_evt_qs /= 2;
4179 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004180 }
4181
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004182 /* If FW supports RSS default queue, then skip creating non-RSS
4183 * queue for non-IP traffic.
4184 */
4185 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4186 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4187
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304188 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4189 be_max_txqs(adapter), be_max_rxqs(adapter),
4190 be_max_rss(adapter), be_max_eqs(adapter),
4191 be_max_vfs(adapter));
4192 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4193 be_max_uc(adapter), be_max_mc(adapter),
4194 be_max_vlans(adapter));
4195
Vasundhara Volamace40af2015-03-04 00:44:34 -05004196 /* Sanitize cfg_num_qs based on HW and platform limits */
4197 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4198 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304199 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004200}
4201
Sathya Perla39f1d942012-05-08 19:41:24 +00004202static int be_get_config(struct be_adapter *adapter)
4203{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004204 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304205 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004206
Kalesh APe97e3cd2014-07-17 16:20:26 +05304207 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004208 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304209 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004210
Sathya Perla6b085ba2015-02-23 04:20:09 -05004211 if (BEx_chip(adapter)) {
4212 level = be_cmd_get_fw_log_level(adapter);
4213 adapter->msg_enable =
4214 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4215 }
4216
4217 be_cmd_get_acpi_wol_cap(adapter);
4218
Vasundhara Volam21252372015-02-06 08:18:42 -05004219 be_cmd_query_port_name(adapter);
4220
4221 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304222 status = be_cmd_get_active_profile(adapter, &profile_id);
4223 if (!status)
4224 dev_info(&adapter->pdev->dev,
4225 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304226 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304227
Sathya Perla92bf14a2013-08-27 16:57:32 +05304228 status = be_get_resources(adapter);
4229 if (status)
4230 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004231
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304232 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4233 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304234 if (!adapter->pmac_id)
4235 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004236
Sathya Perla92bf14a2013-08-27 16:57:32 +05304237 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004238}
4239
Sathya Perla95046b92013-07-23 15:25:02 +05304240static int be_mac_setup(struct be_adapter *adapter)
4241{
4242 u8 mac[ETH_ALEN];
4243 int status;
4244
4245 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4246 status = be_cmd_get_perm_mac(adapter, mac);
4247 if (status)
4248 return status;
4249
4250 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4251 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304252 }
4253
Sathya Perla95046b92013-07-23 15:25:02 +05304254 return 0;
4255}
4256
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304257static void be_schedule_worker(struct be_adapter *adapter)
4258{
4259 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4260 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4261}
4262
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004263static void be_schedule_err_detection(struct be_adapter *adapter)
4264{
4265 schedule_delayed_work(&adapter->be_err_detection_work,
4266 msecs_to_jiffies(1000));
4267 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4268}
4269
Sathya Perla77071332013-08-27 16:57:34 +05304270static int be_setup_queues(struct be_adapter *adapter)
4271{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304272 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304273 int status;
4274
4275 status = be_evt_queues_create(adapter);
4276 if (status)
4277 goto err;
4278
4279 status = be_tx_qs_create(adapter);
4280 if (status)
4281 goto err;
4282
4283 status = be_rx_cqs_create(adapter);
4284 if (status)
4285 goto err;
4286
4287 status = be_mcc_queues_create(adapter);
4288 if (status)
4289 goto err;
4290
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304291 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4292 if (status)
4293 goto err;
4294
4295 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4296 if (status)
4297 goto err;
4298
Sathya Perla77071332013-08-27 16:57:34 +05304299 return 0;
4300err:
4301 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4302 return status;
4303}
4304
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304305int be_update_queues(struct be_adapter *adapter)
4306{
4307 struct net_device *netdev = adapter->netdev;
4308 int status;
4309
4310 if (netif_running(netdev))
4311 be_close(netdev);
4312
4313 be_cancel_worker(adapter);
4314
4315 /* If any vectors have been shared with RoCE we cannot re-program
4316 * the MSIx table.
4317 */
4318 if (!adapter->num_msix_roce_vec)
4319 be_msix_disable(adapter);
4320
4321 be_clear_queues(adapter);
4322
4323 if (!msix_enabled(adapter)) {
4324 status = be_msix_enable(adapter);
4325 if (status)
4326 return status;
4327 }
4328
4329 status = be_setup_queues(adapter);
4330 if (status)
4331 return status;
4332
4333 be_schedule_worker(adapter);
4334
4335 if (netif_running(netdev))
4336 status = be_open(netdev);
4337
4338 return status;
4339}
4340
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004341static inline int fw_major_num(const char *fw_ver)
4342{
4343 int fw_major = 0, i;
4344
4345 i = sscanf(fw_ver, "%d.", &fw_major);
4346 if (i != 1)
4347 return 0;
4348
4349 return fw_major;
4350}
4351
Sathya Perlaf962f842015-02-23 04:20:16 -05004352/* If any VFs are already enabled don't FLR the PF */
4353static bool be_reset_required(struct be_adapter *adapter)
4354{
4355 return pci_num_vf(adapter->pdev) ? false : true;
4356}
4357
4358/* Wait for the FW to be ready and perform the required initialization */
4359static int be_func_init(struct be_adapter *adapter)
4360{
4361 int status;
4362
4363 status = be_fw_wait_ready(adapter);
4364 if (status)
4365 return status;
4366
4367 if (be_reset_required(adapter)) {
4368 status = be_cmd_reset_function(adapter);
4369 if (status)
4370 return status;
4371
4372 /* Wait for interrupts to quiesce after an FLR */
4373 msleep(100);
4374
4375 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304376 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004377 }
4378
4379 /* Tell FW we're ready to fire cmds */
4380 status = be_cmd_fw_init(adapter);
4381 if (status)
4382 return status;
4383
4384 /* Allow interrupts for other ULPs running on NIC function */
4385 be_intr_set(adapter, true);
4386
4387 return 0;
4388}
4389
Sathya Perla5fb379e2009-06-18 00:02:59 +00004390static int be_setup(struct be_adapter *adapter)
4391{
Sathya Perla39f1d942012-05-08 19:41:24 +00004392 struct device *dev = &adapter->pdev->dev;
Kalesh APbcc84142015-08-05 03:27:48 -04004393 u32 en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004394 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004395
Sathya Perlaf962f842015-02-23 04:20:16 -05004396 status = be_func_init(adapter);
4397 if (status)
4398 return status;
4399
Sathya Perla30128032011-11-10 19:17:57 +00004400 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004401
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004402 if (!lancer_chip(adapter))
4403 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004404
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004405 /* Need to invoke this cmd first to get the PCI Function Number */
4406 status = be_cmd_get_cntl_attributes(adapter);
4407 if (status)
4408 return status;
4409
Vasundhara Volamace40af2015-03-04 00:44:34 -05004410 if (!BE2_chip(adapter) && be_physfn(adapter))
4411 be_alloc_sriov_res(adapter);
4412
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004413 status = be_get_config(adapter);
4414 if (status)
4415 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004416
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004417 status = be_msix_enable(adapter);
4418 if (status)
4419 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004420
Kalesh APbcc84142015-08-05 03:27:48 -04004421 /* will enable all the needed filter flags in be_open() */
4422 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4423 en_flags = en_flags & be_if_cap_flags(adapter);
4424 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4425 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004426 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004427 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004428
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304429 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4430 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304431 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304432 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004433 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004434 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004435
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004436 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004437
Sathya Perla95046b92013-07-23 15:25:02 +05304438 status = be_mac_setup(adapter);
4439 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004440 goto err;
4441
Kalesh APe97e3cd2014-07-17 16:20:26 +05304442 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304443 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004444
Somnath Koture9e2a902013-10-24 14:37:53 +05304445 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304446 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304447 adapter->fw_ver);
4448 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4449 }
4450
Kalesh AP00d594c2015-01-20 03:51:44 -05004451 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4452 adapter->rx_fc);
4453 if (status)
4454 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4455 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004456
Kalesh AP00d594c2015-01-20 03:51:44 -05004457 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4458 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004459
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304460 if (be_physfn(adapter))
4461 be_cmd_set_logical_link_config(adapter,
4462 IFLA_VF_LINK_STATE_AUTO, 0);
4463
Vasundhara Volambec84e62014-06-30 13:01:32 +05304464 if (adapter->num_vfs)
4465 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004466
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004467 status = be_cmd_get_phy_info(adapter);
4468 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004469 adapter->phy.fc_autoneg = 1;
4470
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304471 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304472 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004473 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004474err:
4475 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004476 return status;
4477}
4478
Ivan Vecera66268732011-12-08 01:31:21 +00004479#ifdef CONFIG_NET_POLL_CONTROLLER
4480static void be_netpoll(struct net_device *netdev)
4481{
4482 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004483 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004484 int i;
4485
Sathya Perlae49cc342012-11-27 19:50:02 +00004486 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004487 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004488 napi_schedule(&eqo->napi);
4489 }
Ivan Vecera66268732011-12-08 01:31:21 +00004490}
4491#endif
4492
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004493int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4494{
4495 const struct firmware *fw;
4496 int status;
4497
4498 if (!netif_running(adapter->netdev)) {
4499 dev_err(&adapter->pdev->dev,
4500 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304501 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004502 }
4503
4504 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4505 if (status)
4506 goto fw_exit;
4507
4508 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4509
4510 if (lancer_chip(adapter))
4511 status = lancer_fw_download(adapter, fw);
4512 else
4513 status = be_fw_download(adapter, fw);
4514
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004515 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304516 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004517
Ajit Khaparde84517482009-09-04 03:12:16 +00004518fw_exit:
4519 release_firmware(fw);
4520 return status;
4521}
4522
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004523static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4524 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004525{
4526 struct be_adapter *adapter = netdev_priv(dev);
4527 struct nlattr *attr, *br_spec;
4528 int rem;
4529 int status = 0;
4530 u16 mode = 0;
4531
4532 if (!sriov_enabled(adapter))
4533 return -EOPNOTSUPP;
4534
4535 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004536 if (!br_spec)
4537 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004538
4539 nla_for_each_nested(attr, br_spec, rem) {
4540 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4541 continue;
4542
Thomas Grafb7c1a312014-11-26 13:42:17 +01004543 if (nla_len(attr) < sizeof(mode))
4544 return -EINVAL;
4545
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004546 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004547 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4548 return -EOPNOTSUPP;
4549
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004550 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4551 return -EINVAL;
4552
4553 status = be_cmd_set_hsw_config(adapter, 0, 0,
4554 adapter->if_handle,
4555 mode == BRIDGE_MODE_VEPA ?
4556 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004557 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004558 if (status)
4559 goto err;
4560
4561 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4562 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4563
4564 return status;
4565 }
4566err:
4567 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4568 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4569
4570 return status;
4571}
4572
4573static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004574 struct net_device *dev, u32 filter_mask,
4575 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004576{
4577 struct be_adapter *adapter = netdev_priv(dev);
4578 int status = 0;
4579 u8 hsw_mode;
4580
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004581 /* BE and Lancer chips support VEB mode only */
4582 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4583 hsw_mode = PORT_FWD_TYPE_VEB;
4584 } else {
4585 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004586 adapter->if_handle, &hsw_mode,
4587 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004588 if (status)
4589 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004590
4591 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4592 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004593 }
4594
4595 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4596 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004597 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004598 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004599}
4600
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304601#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004602/* VxLAN offload Notes:
4603 *
4604 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4605 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4606 * is expected to work across all types of IP tunnels once exported. Skyhawk
4607 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304608 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4609 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4610 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004611 *
4612 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4613 * adds more than one port, disable offloads and don't re-enable them again
4614 * until after all the tunnels are removed.
4615 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304616static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4617 __be16 port)
4618{
4619 struct be_adapter *adapter = netdev_priv(netdev);
4620 struct device *dev = &adapter->pdev->dev;
4621 int status;
4622
Ivan Veceraaf19e682015-08-14 22:30:01 +02004623 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304624 return;
4625
Jiri Benc1e5b3112015-09-17 16:11:13 +02004626 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4627 adapter->vxlan_port_aliases++;
4628 return;
4629 }
4630
Sathya Perlac9c47142014-03-27 10:46:19 +05304631 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304632 dev_info(dev,
4633 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004634 dev_info(dev, "Disabling VxLAN offloads\n");
4635 adapter->vxlan_port_count++;
4636 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304637 }
4638
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004639 if (adapter->vxlan_port_count++ >= 1)
4640 return;
4641
Sathya Perlac9c47142014-03-27 10:46:19 +05304642 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4643 OP_CONVERT_NORMAL_TO_TUNNEL);
4644 if (status) {
4645 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4646 goto err;
4647 }
4648
4649 status = be_cmd_set_vxlan_port(adapter, port);
4650 if (status) {
4651 dev_warn(dev, "Failed to add VxLAN port\n");
4652 goto err;
4653 }
4654 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4655 adapter->vxlan_port = port;
4656
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004657 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4658 NETIF_F_TSO | NETIF_F_TSO6 |
4659 NETIF_F_GSO_UDP_TUNNEL;
4660 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304661 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004662
Sathya Perlac9c47142014-03-27 10:46:19 +05304663 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4664 be16_to_cpu(port));
4665 return;
4666err:
4667 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304668}
4669
4670static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4671 __be16 port)
4672{
4673 struct be_adapter *adapter = netdev_priv(netdev);
4674
Ivan Veceraaf19e682015-08-14 22:30:01 +02004675 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304676 return;
4677
4678 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004679 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304680
Jiri Benc1e5b3112015-09-17 16:11:13 +02004681 if (adapter->vxlan_port_aliases) {
4682 adapter->vxlan_port_aliases--;
4683 return;
4684 }
4685
Sathya Perlac9c47142014-03-27 10:46:19 +05304686 be_disable_vxlan_offloads(adapter);
4687
4688 dev_info(&adapter->pdev->dev,
4689 "Disabled VxLAN offloads for UDP port %d\n",
4690 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004691done:
4692 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304693}
Joe Stringer725d5482014-11-13 16:38:13 -08004694
Jesse Gross5f352272014-12-23 22:37:26 -08004695static netdev_features_t be_features_check(struct sk_buff *skb,
4696 struct net_device *dev,
4697 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004698{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304699 struct be_adapter *adapter = netdev_priv(dev);
4700 u8 l4_hdr = 0;
4701
4702 /* The code below restricts offload features for some tunneled packets.
4703 * Offload features for normal (non tunnel) packets are unchanged.
4704 */
4705 if (!skb->encapsulation ||
4706 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4707 return features;
4708
4709 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4710 * should disable tunnel offload features if it's not a VxLAN packet,
4711 * as tunnel offloads have been enabled only for VxLAN. This is done to
4712 * allow other tunneled traffic like GRE work fine while VxLAN
4713 * offloads are configured in Skyhawk-R.
4714 */
4715 switch (vlan_get_protocol(skb)) {
4716 case htons(ETH_P_IP):
4717 l4_hdr = ip_hdr(skb)->protocol;
4718 break;
4719 case htons(ETH_P_IPV6):
4720 l4_hdr = ipv6_hdr(skb)->nexthdr;
4721 break;
4722 default:
4723 return features;
4724 }
4725
4726 if (l4_hdr != IPPROTO_UDP ||
4727 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4728 skb->inner_protocol != htons(ETH_P_TEB) ||
4729 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4730 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08004731 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304732
4733 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004734}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304735#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304736
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304737static int be_get_phys_port_id(struct net_device *dev,
4738 struct netdev_phys_item_id *ppid)
4739{
4740 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4741 struct be_adapter *adapter = netdev_priv(dev);
4742 u8 *id;
4743
4744 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4745 return -ENOSPC;
4746
4747 ppid->id[0] = adapter->hba_port_num + 1;
4748 id = &ppid->id[1];
4749 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4750 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4751 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4752
4753 ppid->id_len = id_len;
4754
4755 return 0;
4756}
4757
stephen hemmingere5686ad2012-01-05 19:10:25 +00004758static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004759 .ndo_open = be_open,
4760 .ndo_stop = be_close,
4761 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004762 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004763 .ndo_set_mac_address = be_mac_addr_set,
4764 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004765 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004766 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004767 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4768 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004769 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004770 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004771 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004772 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304773 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004774 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00004775#ifdef CONFIG_NET_POLL_CONTROLLER
4776 .ndo_poll_controller = be_netpoll,
4777#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004778 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4779 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304780#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304781 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304782#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304783#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304784 .ndo_add_vxlan_port = be_add_vxlan_port,
4785 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004786 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304787#endif
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304788 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004789};
4790
4791static void be_netdev_init(struct net_device *netdev)
4792{
4793 struct be_adapter *adapter = netdev_priv(netdev);
4794
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004795 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004796 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004797 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004798 if (be_multi_rxq(adapter))
4799 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004800
4801 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004802 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004803
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004804 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004805 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004806
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004807 netdev->priv_flags |= IFF_UNICAST_FLT;
4808
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004809 netdev->flags |= IFF_MULTICAST;
4810
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004811 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004812
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004813 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004814
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004815 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004816}
4817
Kalesh AP87ac1a52015-02-23 04:20:15 -05004818static void be_cleanup(struct be_adapter *adapter)
4819{
4820 struct net_device *netdev = adapter->netdev;
4821
4822 rtnl_lock();
4823 netif_device_detach(netdev);
4824 if (netif_running(netdev))
4825 be_close(netdev);
4826 rtnl_unlock();
4827
4828 be_clear(adapter);
4829}
4830
Kalesh AP484d76f2015-02-23 04:20:14 -05004831static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004832{
Kalesh APd0e1b312015-02-23 04:20:12 -05004833 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004834 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004835
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004836 status = be_setup(adapter);
4837 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05004838 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004839
Kalesh APd0e1b312015-02-23 04:20:12 -05004840 if (netif_running(netdev)) {
4841 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004842 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05004843 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004844 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004845
Kalesh APd0e1b312015-02-23 04:20:12 -05004846 netif_device_attach(netdev);
4847
Kalesh AP484d76f2015-02-23 04:20:14 -05004848 return 0;
4849}
4850
4851static int be_err_recover(struct be_adapter *adapter)
4852{
4853 struct device *dev = &adapter->pdev->dev;
4854 int status;
4855
4856 status = be_resume(adapter);
4857 if (status)
4858 goto err;
4859
Sathya Perla9fa465c2015-02-23 04:20:13 -05004860 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004861 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004862err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05004863 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05304864 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05004865 else
4866 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004867
4868 return status;
4869}
4870
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004871static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004872{
4873 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004874 container_of(work, struct be_adapter,
4875 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004876 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004877
4878 be_detect_error(adapter);
4879
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304880 if (be_check_error(adapter, BE_ERROR_HW)) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05004881 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05004882
4883 /* As of now error recovery support is in Lancer only */
4884 if (lancer_chip(adapter))
4885 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004886 }
4887
Sathya Perla9fa465c2015-02-23 04:20:13 -05004888 /* Always attempt recovery on VFs */
4889 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004890 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004891}
4892
Vasundhara Volam21252372015-02-06 08:18:42 -05004893static void be_log_sfp_info(struct be_adapter *adapter)
4894{
4895 int status;
4896
4897 status = be_cmd_query_sfp_info(adapter);
4898 if (!status) {
4899 dev_err(&adapter->pdev->dev,
4900 "Unqualified SFP+ detected on %c from %s part no: %s",
4901 adapter->port_name, adapter->phy.vendor_name,
4902 adapter->phy.vendor_pn);
4903 }
4904 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
4905}
4906
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004907static void be_worker(struct work_struct *work)
4908{
4909 struct be_adapter *adapter =
4910 container_of(work, struct be_adapter, work.work);
4911 struct be_rx_obj *rxo;
4912 int i;
4913
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004914 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05004915 * mcc completions
4916 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004917 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004918 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004919 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004920 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004921 goto reschedule;
4922 }
4923
4924 if (!adapter->stats_cmd_sent) {
4925 if (lancer_chip(adapter))
4926 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05304927 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004928 else
4929 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4930 }
4931
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304932 if (be_physfn(adapter) &&
4933 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004934 be_cmd_get_die_temperature(adapter);
4935
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004936 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304937 /* Replenish RX-queues starved due to memory
4938 * allocation failures.
4939 */
4940 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05304941 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004942 }
4943
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004944 /* EQ-delay update for Skyhawk is done while notifying EQ */
4945 if (!skyhawk_chip(adapter))
4946 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004947
Vasundhara Volam21252372015-02-06 08:18:42 -05004948 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
4949 be_log_sfp_info(adapter);
4950
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004951reschedule:
4952 adapter->work_counter++;
4953 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4954}
4955
Sathya Perla78fad34e2015-02-23 04:20:08 -05004956static void be_unmap_pci_bars(struct be_adapter *adapter)
4957{
4958 if (adapter->csr)
4959 pci_iounmap(adapter->pdev, adapter->csr);
4960 if (adapter->db)
4961 pci_iounmap(adapter->pdev, adapter->db);
4962}
4963
4964static int db_bar(struct be_adapter *adapter)
4965{
Kalesh AP18c57c72015-05-06 05:30:38 -04004966 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05004967 return 0;
4968 else
4969 return 4;
4970}
4971
4972static int be_roce_map_pci_bars(struct be_adapter *adapter)
4973{
4974 if (skyhawk_chip(adapter)) {
4975 adapter->roce_db.size = 4096;
4976 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4977 db_bar(adapter));
4978 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4979 db_bar(adapter));
4980 }
4981 return 0;
4982}
4983
4984static int be_map_pci_bars(struct be_adapter *adapter)
4985{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04004986 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05004987 u8 __iomem *addr;
4988 u32 sli_intf;
4989
4990 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4991 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4992 SLI_INTF_FAMILY_SHIFT;
4993 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4994
4995 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04004996 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05004997 if (!adapter->csr)
4998 return -ENOMEM;
4999 }
5000
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005001 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005002 if (!addr)
5003 goto pci_map_err;
5004 adapter->db = addr;
5005
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005006 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5007 if (be_physfn(adapter)) {
5008 /* PCICFG is the 2nd BAR in BE2 */
5009 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5010 if (!addr)
5011 goto pci_map_err;
5012 adapter->pcicfg = addr;
5013 } else {
5014 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5015 }
5016 }
5017
Sathya Perla78fad34e2015-02-23 04:20:08 -05005018 be_roce_map_pci_bars(adapter);
5019 return 0;
5020
5021pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005022 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005023 be_unmap_pci_bars(adapter);
5024 return -ENOMEM;
5025}
5026
5027static void be_drv_cleanup(struct be_adapter *adapter)
5028{
5029 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5030 struct device *dev = &adapter->pdev->dev;
5031
5032 if (mem->va)
5033 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5034
5035 mem = &adapter->rx_filter;
5036 if (mem->va)
5037 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5038
5039 mem = &adapter->stats_cmd;
5040 if (mem->va)
5041 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5042}
5043
5044/* Allocate and initialize various fields in be_adapter struct */
5045static int be_drv_init(struct be_adapter *adapter)
5046{
5047 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5048 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5049 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5050 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5051 struct device *dev = &adapter->pdev->dev;
5052 int status = 0;
5053
5054 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305055 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5056 &mbox_mem_alloc->dma,
5057 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005058 if (!mbox_mem_alloc->va)
5059 return -ENOMEM;
5060
5061 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5062 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5063 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005064
5065 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5066 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5067 &rx_filter->dma, GFP_KERNEL);
5068 if (!rx_filter->va) {
5069 status = -ENOMEM;
5070 goto free_mbox;
5071 }
5072
5073 if (lancer_chip(adapter))
5074 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5075 else if (BE2_chip(adapter))
5076 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5077 else if (BE3_chip(adapter))
5078 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5079 else
5080 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5081 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5082 &stats_cmd->dma, GFP_KERNEL);
5083 if (!stats_cmd->va) {
5084 status = -ENOMEM;
5085 goto free_rx_filter;
5086 }
5087
5088 mutex_init(&adapter->mbox_lock);
5089 spin_lock_init(&adapter->mcc_lock);
5090 spin_lock_init(&adapter->mcc_cq_lock);
5091 init_completion(&adapter->et_cmd_compl);
5092
5093 pci_save_state(adapter->pdev);
5094
5095 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005096 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5097 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005098
5099 adapter->rx_fc = true;
5100 adapter->tx_fc = true;
5101
5102 /* Must be a power of 2 or else MODULO will BUG_ON */
5103 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005104
5105 return 0;
5106
5107free_rx_filter:
5108 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5109free_mbox:
5110 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5111 mbox_mem_alloc->dma);
5112 return status;
5113}
5114
5115static void be_remove(struct pci_dev *pdev)
5116{
5117 struct be_adapter *adapter = pci_get_drvdata(pdev);
5118
5119 if (!adapter)
5120 return;
5121
5122 be_roce_dev_remove(adapter);
5123 be_intr_set(adapter, false);
5124
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005125 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005126
5127 unregister_netdev(adapter->netdev);
5128
5129 be_clear(adapter);
5130
5131 /* tell fw we're done with firing cmds */
5132 be_cmd_fw_clean(adapter);
5133
5134 be_unmap_pci_bars(adapter);
5135 be_drv_cleanup(adapter);
5136
5137 pci_disable_pcie_error_reporting(pdev);
5138
5139 pci_release_regions(pdev);
5140 pci_disable_device(pdev);
5141
5142 free_netdev(adapter->netdev);
5143}
5144
Arnd Bergmann9a032592015-05-18 23:06:45 +02005145static ssize_t be_hwmon_show_temp(struct device *dev,
5146 struct device_attribute *dev_attr,
5147 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305148{
5149 struct be_adapter *adapter = dev_get_drvdata(dev);
5150
5151 /* Unit: millidegree Celsius */
5152 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5153 return -EIO;
5154 else
5155 return sprintf(buf, "%u\n",
5156 adapter->hwmon_info.be_on_die_temp * 1000);
5157}
5158
5159static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5160 be_hwmon_show_temp, NULL, 1);
5161
5162static struct attribute *be_hwmon_attrs[] = {
5163 &sensor_dev_attr_temp1_input.dev_attr.attr,
5164 NULL
5165};
5166
5167ATTRIBUTE_GROUPS(be_hwmon);
5168
Sathya Perlad3791422012-09-28 04:39:44 +00005169static char *mc_name(struct be_adapter *adapter)
5170{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305171 char *str = ""; /* default */
5172
5173 switch (adapter->mc_type) {
5174 case UMC:
5175 str = "UMC";
5176 break;
5177 case FLEX10:
5178 str = "FLEX10";
5179 break;
5180 case vNIC1:
5181 str = "vNIC-1";
5182 break;
5183 case nPAR:
5184 str = "nPAR";
5185 break;
5186 case UFP:
5187 str = "UFP";
5188 break;
5189 case vNIC2:
5190 str = "vNIC-2";
5191 break;
5192 default:
5193 str = "";
5194 }
5195
5196 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005197}
5198
5199static inline char *func_name(struct be_adapter *adapter)
5200{
5201 return be_physfn(adapter) ? "PF" : "VF";
5202}
5203
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005204static inline char *nic_name(struct pci_dev *pdev)
5205{
5206 switch (pdev->device) {
5207 case OC_DEVICE_ID1:
5208 return OC_NAME;
5209 case OC_DEVICE_ID2:
5210 return OC_NAME_BE;
5211 case OC_DEVICE_ID3:
5212 case OC_DEVICE_ID4:
5213 return OC_NAME_LANCER;
5214 case BE_DEVICE_ID2:
5215 return BE3_NAME;
5216 case OC_DEVICE_ID5:
5217 case OC_DEVICE_ID6:
5218 return OC_NAME_SH;
5219 default:
5220 return BE_NAME;
5221 }
5222}
5223
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005224static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005225{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005226 struct be_adapter *adapter;
5227 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005228 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005229
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305230 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5231
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005232 status = pci_enable_device(pdev);
5233 if (status)
5234 goto do_none;
5235
5236 status = pci_request_regions(pdev, DRV_NAME);
5237 if (status)
5238 goto disable_dev;
5239 pci_set_master(pdev);
5240
Sathya Perla7f640062012-06-05 19:37:20 +00005241 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305242 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005243 status = -ENOMEM;
5244 goto rel_reg;
5245 }
5246 adapter = netdev_priv(netdev);
5247 adapter->pdev = pdev;
5248 pci_set_drvdata(pdev, adapter);
5249 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005250 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005251
Russell King4c15c242013-06-26 23:49:11 +01005252 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005253 if (!status) {
5254 netdev->features |= NETIF_F_HIGHDMA;
5255 } else {
Russell King4c15c242013-06-26 23:49:11 +01005256 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005257 if (status) {
5258 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5259 goto free_netdev;
5260 }
5261 }
5262
Kalesh AP2f951a92014-09-12 17:39:21 +05305263 status = pci_enable_pcie_error_reporting(pdev);
5264 if (!status)
5265 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005266
Sathya Perla78fad34e2015-02-23 04:20:08 -05005267 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005268 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005269 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005270
Sathya Perla78fad34e2015-02-23 04:20:08 -05005271 status = be_drv_init(adapter);
5272 if (status)
5273 goto unmap_bars;
5274
Sathya Perla5fb379e2009-06-18 00:02:59 +00005275 status = be_setup(adapter);
5276 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005277 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005278
Sathya Perla3abcded2010-10-03 22:12:27 -07005279 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005280 status = register_netdev(netdev);
5281 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005282 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005283
Parav Pandit045508a2012-03-26 14:27:13 +00005284 be_roce_dev_add(adapter);
5285
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005286 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005287
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305288 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005289 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305290 adapter->hwmon_info.hwmon_dev =
5291 devm_hwmon_device_register_with_groups(&pdev->dev,
5292 DRV_NAME,
5293 adapter,
5294 be_hwmon_groups);
5295 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5296 }
5297
Sathya Perlad3791422012-09-28 04:39:44 +00005298 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005299 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005300
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005301 return 0;
5302
Sathya Perla5fb379e2009-06-18 00:02:59 +00005303unsetup:
5304 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005305drv_cleanup:
5306 be_drv_cleanup(adapter);
5307unmap_bars:
5308 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005309free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005310 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005311rel_reg:
5312 pci_release_regions(pdev);
5313disable_dev:
5314 pci_disable_device(pdev);
5315do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005316 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005317 return status;
5318}
5319
5320static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5321{
5322 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005323
Suresh Reddy76a9e082014-01-15 13:23:40 +05305324 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005325 be_setup_wol(adapter, true);
5326
Ajit Khaparded4360d62013-11-22 12:51:09 -06005327 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005328 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005329
Kalesh AP87ac1a52015-02-23 04:20:15 -05005330 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005331
5332 pci_save_state(pdev);
5333 pci_disable_device(pdev);
5334 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5335 return 0;
5336}
5337
Kalesh AP484d76f2015-02-23 04:20:14 -05005338static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005339{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005340 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005341 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005342
5343 status = pci_enable_device(pdev);
5344 if (status)
5345 return status;
5346
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005347 pci_restore_state(pdev);
5348
Kalesh AP484d76f2015-02-23 04:20:14 -05005349 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005350 if (status)
5351 return status;
5352
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005353 be_schedule_err_detection(adapter);
5354
Suresh Reddy76a9e082014-01-15 13:23:40 +05305355 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005356 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005357
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005358 return 0;
5359}
5360
Sathya Perla82456b02010-02-17 01:35:37 +00005361/*
5362 * An FLR will stop BE from DMAing any data.
5363 */
5364static void be_shutdown(struct pci_dev *pdev)
5365{
5366 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005367
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005368 if (!adapter)
5369 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005370
Devesh Sharmad114f992014-06-10 19:32:15 +05305371 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005372 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005373 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005374
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005375 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005376
Ajit Khaparde57841862011-04-06 18:08:43 +00005377 be_cmd_reset_function(adapter);
5378
Sathya Perla82456b02010-02-17 01:35:37 +00005379 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005380}
5381
Sathya Perlacf588472010-02-14 21:22:01 +00005382static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305383 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005384{
5385 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005386
5387 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5388
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305389 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5390 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005391
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005392 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005393
Kalesh AP87ac1a52015-02-23 04:20:15 -05005394 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005395 }
Sathya Perlacf588472010-02-14 21:22:01 +00005396
5397 if (state == pci_channel_io_perm_failure)
5398 return PCI_ERS_RESULT_DISCONNECT;
5399
5400 pci_disable_device(pdev);
5401
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005402 /* The error could cause the FW to trigger a flash debug dump.
5403 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005404 * can cause it not to recover; wait for it to finish.
5405 * Wait only for first function as it is needed only once per
5406 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005407 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005408 if (pdev->devfn == 0)
5409 ssleep(30);
5410
Sathya Perlacf588472010-02-14 21:22:01 +00005411 return PCI_ERS_RESULT_NEED_RESET;
5412}
5413
5414static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5415{
5416 struct be_adapter *adapter = pci_get_drvdata(pdev);
5417 int status;
5418
5419 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005420
5421 status = pci_enable_device(pdev);
5422 if (status)
5423 return PCI_ERS_RESULT_DISCONNECT;
5424
5425 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005426 pci_restore_state(pdev);
5427
5428 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005429 dev_info(&adapter->pdev->dev,
5430 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005431 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005432 if (status)
5433 return PCI_ERS_RESULT_DISCONNECT;
5434
Sathya Perlad6b6d982012-09-05 01:56:48 +00005435 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305436 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005437 return PCI_ERS_RESULT_RECOVERED;
5438}
5439
5440static void be_eeh_resume(struct pci_dev *pdev)
5441{
5442 int status = 0;
5443 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005444
5445 dev_info(&adapter->pdev->dev, "EEH resume\n");
5446
5447 pci_save_state(pdev);
5448
Kalesh AP484d76f2015-02-23 04:20:14 -05005449 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005450 if (status)
5451 goto err;
5452
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005453 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005454 return;
5455err:
5456 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005457}
5458
Vasundhara Volamace40af2015-03-04 00:44:34 -05005459static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5460{
5461 struct be_adapter *adapter = pci_get_drvdata(pdev);
5462 u16 num_vf_qs;
5463 int status;
5464
5465 if (!num_vfs)
5466 be_vf_clear(adapter);
5467
5468 adapter->num_vfs = num_vfs;
5469
5470 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5471 dev_warn(&pdev->dev,
5472 "Cannot disable VFs while they are assigned\n");
5473 return -EBUSY;
5474 }
5475
5476 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5477 * are equally distributed across the max-number of VFs. The user may
5478 * request only a subset of the max-vfs to be enabled.
5479 * Based on num_vfs, redistribute the resources across num_vfs so that
5480 * each VF will have access to more number of resources.
5481 * This facility is not available in BE3 FW.
5482 * Also, this is done by FW in Lancer chip.
5483 */
5484 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5485 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5486 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5487 adapter->num_vfs, num_vf_qs);
5488 if (status)
5489 dev_err(&pdev->dev,
5490 "Failed to optimize SR-IOV resources\n");
5491 }
5492
5493 status = be_get_resources(adapter);
5494 if (status)
5495 return be_cmd_status(status);
5496
5497 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5498 rtnl_lock();
5499 status = be_update_queues(adapter);
5500 rtnl_unlock();
5501 if (status)
5502 return be_cmd_status(status);
5503
5504 if (adapter->num_vfs)
5505 status = be_vf_setup(adapter);
5506
5507 if (!status)
5508 return adapter->num_vfs;
5509
5510 return 0;
5511}
5512
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005513static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005514 .error_detected = be_eeh_err_detected,
5515 .slot_reset = be_eeh_reset,
5516 .resume = be_eeh_resume,
5517};
5518
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005519static struct pci_driver be_driver = {
5520 .name = DRV_NAME,
5521 .id_table = be_dev_ids,
5522 .probe = be_probe,
5523 .remove = be_remove,
5524 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005525 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005526 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005527 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005528 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005529};
5530
5531static int __init be_init_module(void)
5532{
Joe Perches8e95a202009-12-03 07:58:21 +00005533 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5534 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005535 printk(KERN_WARNING DRV_NAME
5536 " : Module param rx_frag_size must be 2048/4096/8192."
5537 " Using 2048\n");
5538 rx_frag_size = 2048;
5539 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005540
Vasundhara Volamace40af2015-03-04 00:44:34 -05005541 if (num_vfs > 0) {
5542 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5543 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5544 }
5545
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005546 return pci_register_driver(&be_driver);
5547}
5548module_init(be_init_module);
5549
5550static void __exit be_exit_module(void)
5551{
5552 pci_unregister_driver(&be_driver);
5553}
5554module_exit(be_exit_module);