blob: 0bd64f1f9778093298b93e472096723cc92b7d3d [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530182 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000199
200 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530208
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000214
215 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000216 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla8788fdc2009-07-27 22:52:03 +0000219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222{
223 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000227
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530228 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000229 return;
230
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239}
240
Sathya Perla8788fdc2009-07-27 22:52:03 +0000241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242{
243 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000248
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530249 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000250 return;
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256}
257
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
Kalesh APbcc84142015-08-05 03:27:48 -0400276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000285 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000297 }
298
Sathya Perla5a712c12013-07-23 15:24:59 +0530299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000301 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000304 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
dingtianhong61d23e92013-12-30 15:40:43 +0800310 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530311 status = -EPERM;
312 goto err;
313 }
Kalesh APbcc84142015-08-05 03:27:48 -0400314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000317 return 0;
318err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320 return status;
321}
322
Sathya Perlaca34fe32012-11-06 17:48:56 +0000323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500330 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
333 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
337 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500348 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
351 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
355 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000364 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
Sathya Perlaca34fe32012-11-06 17:48:56 +0000408static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
Ajit Khaparde61000862013-10-03 16:16:33 -0500454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530498 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500506}
507
Selvin Xavier005d5692011-05-16 07:36:35 +0000508static void populate_lancer_stats(struct be_adapter *adapter)
509{
Selvin Xavier005d5692011-05-16 07:36:35 +0000510 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000538 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000541 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000542 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000543}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000544
Sathya Perla09c1c682011-08-22 19:41:53 +0000545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
Jingoo Han4188e7d2013-08-05 18:02:02 +0900557static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530558 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000570void be_parse_stats(struct be_adapter *adapter)
571{
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573 struct be_rx_obj *rxo;
574 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000575 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000576
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000579 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500582 else if (BE3_chip(adapter))
583 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000584 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500585 else
586 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000587
Ajit Khaparde61000862013-10-03 16:16:33 -0500588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000589 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000592 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000593 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000594}
595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530597 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000600 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000602 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000603 u64 pkts, bytes;
604 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700605 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606
Sathya Perla3abcded2010-10-03 22:12:27 -0700607 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530609
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700620 }
621
Sathya Perla3c8def92011-06-12 20:01:58 +0000622 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530624
Sathya Perlaab1594e2011-07-25 19:10:15 +0000625 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000632 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
634 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000650
Sathya Perlaab1594e2011-07-25 19:10:15 +0000651 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
653 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000655
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000661 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662}
663
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666 struct net_device *netdev = adapter->netdev;
667
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000669 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000672
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530673 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682{
Sathya Perla3c8def92011-06-12 20:01:58 +0000683 struct be_tx_stats *stats = tx_stats(txo);
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530684 u64 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
Sathya Perla3c8def92011-06-12 20:01:58 +0000685
Sathya Perlaab1594e2011-07-25 19:10:15 +0000686 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000687 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500688 stats->tx_bytes += skb->len;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +0530689 stats->tx_pkts += tx_pkts;
690 if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
691 stats->tx_vxlan_offload_pkts += tx_pkts;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000692 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700693}
694
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500695/* Returns number of WRBs needed for the skb */
696static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500698 /* +1 for the header wrb */
699 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500704 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
705 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
706 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
707 wrb->rsvd0 = 0;
708}
709
710/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
711 * to avoid the swap and shift/mask operations in wrb_fill().
712 */
713static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
714{
715 wrb->frag_pa_hi = 0;
716 wrb->frag_pa_lo = 0;
717 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000718 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700719}
720
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000721static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530722 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000723{
724 u8 vlan_prio;
725 u16 vlan_tag;
726
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100727 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000728 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
729 /* If vlan priority provided by OS is NOT in available bmap */
730 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
731 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
Sathya Perlafdf81bf2015-12-30 01:29:01 -0500732 adapter->recommended_prio_bits;
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000733
734 return vlan_tag;
735}
736
Sathya Perlac9c47142014-03-27 10:46:19 +0530737/* Used only for IP tunnel packets */
738static u16 skb_inner_ip_proto(struct sk_buff *skb)
739{
740 return (inner_ip_hdr(skb)->version == 4) ?
741 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
742}
743
744static u16 skb_ip_proto(struct sk_buff *skb)
745{
746 return (ip_hdr(skb)->version == 4) ?
747 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
748}
749
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530750static inline bool be_is_txq_full(struct be_tx_obj *txo)
751{
752 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
753}
754
755static inline bool be_can_txq_wake(struct be_tx_obj *txo)
756{
757 return atomic_read(&txo->q.used) < txo->q.len / 2;
758}
759
760static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
761{
762 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
763}
764
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530765static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
766 struct sk_buff *skb,
767 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530769 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700770
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000771 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530772 BE_WRB_F_SET(wrb_params->features, LSO, 1);
773 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000774 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530775 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700776 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530777 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530778 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530779 proto = skb_inner_ip_proto(skb);
780 } else {
781 proto = skb_ip_proto(skb);
782 }
783 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530784 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530785 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530786 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 }
788
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100789 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530790 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
791 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 }
793
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530794 BE_WRB_F_SET(wrb_params->features, CRC, 1);
795}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500796
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530797static void wrb_fill_hdr(struct be_adapter *adapter,
798 struct be_eth_hdr_wrb *hdr,
799 struct be_wrb_params *wrb_params,
800 struct sk_buff *skb)
801{
802 memset(hdr, 0, sizeof(*hdr));
803
804 SET_TX_WRB_HDR_BITS(crc, hdr,
805 BE_WRB_F_GET(wrb_params->features, CRC));
806 SET_TX_WRB_HDR_BITS(ipcs, hdr,
807 BE_WRB_F_GET(wrb_params->features, IPCS));
808 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
809 BE_WRB_F_GET(wrb_params->features, TCPCS));
810 SET_TX_WRB_HDR_BITS(udpcs, hdr,
811 BE_WRB_F_GET(wrb_params->features, UDPCS));
812
813 SET_TX_WRB_HDR_BITS(lso, hdr,
814 BE_WRB_F_GET(wrb_params->features, LSO));
815 SET_TX_WRB_HDR_BITS(lso6, hdr,
816 BE_WRB_F_GET(wrb_params->features, LSO6));
817 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
818
819 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
820 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500821 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530822 SET_TX_WRB_HDR_BITS(event, hdr,
823 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
824 SET_TX_WRB_HDR_BITS(vlan, hdr,
825 BE_WRB_F_GET(wrb_params->features, VLAN));
826 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
827
828 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
829 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530830 SET_TX_WRB_HDR_BITS(mgmt, hdr,
831 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832}
833
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000834static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530835 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000836{
837 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500838 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000839
Sathya Perla7101e112010-03-22 20:41:12 +0000840
Sathya Perlaf986afc2015-02-06 08:18:43 -0500841 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
842 (u64)le32_to_cpu(wrb->frag_pa_lo);
843 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000844 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500845 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000846 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500847 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000848 }
849}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530851/* Grab a WRB header for xmit */
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530852static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853{
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530854 u32 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530856 queue_head_inc(&txo->q);
857 return head;
858}
859
860/* Set up the WRB header for xmit */
861static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
862 struct be_tx_obj *txo,
863 struct be_wrb_params *wrb_params,
864 struct sk_buff *skb, u16 head)
865{
866 u32 num_frags = skb_wrb_cnt(skb);
867 struct be_queue_info *txq = &txo->q;
868 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
869
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530870 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500871 be_dws_cpu_to_le(hdr, sizeof(*hdr));
872
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500873 BUG_ON(txo->sent_skb_list[head]);
874 txo->sent_skb_list[head] = skb;
875 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530876 atomic_add(num_frags, &txq->used);
877 txo->last_req_wrb_cnt = num_frags;
878 txo->pend_wrb_cnt += num_frags;
879}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530881/* Setup a WRB fragment (buffer descriptor) for xmit */
882static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
883 int len)
884{
885 struct be_eth_wrb *wrb;
886 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700887
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530888 wrb = queue_head_node(txq);
889 wrb_fill(wrb, busaddr, len);
890 queue_head_inc(txq);
891}
892
893/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
894 * was invoked. The producer index is restored to the previous packet and the
895 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
896 */
897static void be_xmit_restore(struct be_adapter *adapter,
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530898 struct be_tx_obj *txo, u32 head, bool map_single,
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530899 u32 copied)
900{
901 struct device *dev;
902 struct be_eth_wrb *wrb;
903 struct be_queue_info *txq = &txo->q;
904
905 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500906 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530907
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500908 /* skip the first wrb (hdr); it's not mapped */
909 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000910 while (copied) {
911 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000912 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000913 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500914 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000915 queue_head_inc(txq);
916 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530917
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500918 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530919}
920
921/* Enqueue the given packet for transmit. This routine allocates WRBs for the
922 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
923 * of WRBs used up by the packet.
924 */
925static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
926 struct sk_buff *skb,
927 struct be_wrb_params *wrb_params)
928{
929 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
930 struct device *dev = &adapter->pdev->dev;
931 struct be_queue_info *txq = &txo->q;
932 bool map_single = false;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +0530933 u32 head = txq->head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530934 dma_addr_t busaddr;
935 int len;
936
937 head = be_tx_get_wrb_hdr(txo);
938
939 if (skb->len > skb->data_len) {
940 len = skb_headlen(skb);
941
942 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
943 if (dma_mapping_error(dev, busaddr))
944 goto dma_err;
945 map_single = true;
946 be_tx_setup_wrb_frag(txo, busaddr, len);
947 copied += len;
948 }
949
950 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
951 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
952 len = skb_frag_size(frag);
953
954 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
955 if (dma_mapping_error(dev, busaddr))
956 goto dma_err;
957 be_tx_setup_wrb_frag(txo, busaddr, len);
958 copied += len;
959 }
960
961 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
962
963 be_tx_stats_update(txo, skb);
964 return wrb_cnt;
965
966dma_err:
967 adapter->drv_stats.dma_map_errors++;
968 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000969 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700970}
971
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500972static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
973{
974 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
975}
976
Somnath Kotur93040ae2012-06-26 22:32:10 +0000977static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000978 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530979 struct be_wrb_params
980 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000981{
982 u16 vlan_tag = 0;
983
984 skb = skb_share_check(skb, GFP_ATOMIC);
985 if (unlikely(!skb))
986 return skb;
987
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100988 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000989 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530990
991 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
992 if (!vlan_tag)
993 vlan_tag = adapter->pvid;
994 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
995 * skip VLAN insertion
996 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530997 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530998 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000999
1000 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +01001001 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1002 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001003 if (unlikely(!skb))
1004 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001005 skb->vlan_tci = 0;
1006 }
1007
1008 /* Insert the outer VLAN, if any */
1009 if (adapter->qnq_vid) {
1010 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001011 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1012 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001013 if (unlikely(!skb))
1014 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301015 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001016 }
1017
Somnath Kotur93040ae2012-06-26 22:32:10 +00001018 return skb;
1019}
1020
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001021static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1022{
1023 struct ethhdr *eh = (struct ethhdr *)skb->data;
1024 u16 offset = ETH_HLEN;
1025
1026 if (eh->h_proto == htons(ETH_P_IPV6)) {
1027 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1028
1029 offset += sizeof(struct ipv6hdr);
1030 if (ip6h->nexthdr != NEXTHDR_TCP &&
1031 ip6h->nexthdr != NEXTHDR_UDP) {
1032 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301033 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001034
1035 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1036 if (ehdr->hdrlen == 0xff)
1037 return true;
1038 }
1039 }
1040 return false;
1041}
1042
1043static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1044{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001045 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001046}
1047
Sathya Perla748b5392014-05-09 13:29:13 +05301048static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001049{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001050 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051}
1052
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301053static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1054 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301055 struct be_wrb_params
1056 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001058 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001059 unsigned int eth_hdr_len;
1060 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001061
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001062 /* For padded packets, BE HW modifies tot_len field in IP header
1063 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001064 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001065 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001066 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1067 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001068 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001069 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001070 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001071 ip = (struct iphdr *)ip_hdr(skb);
1072 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1073 }
1074
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001075 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301076 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001077 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301078 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001079 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301080 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001081
Somnath Kotur93040ae2012-06-26 22:32:10 +00001082 /* HW has a bug wherein it will calculate CSUM for VLAN
1083 * pkts even though it is disabled.
1084 * Manually insert VLAN in pkt.
1085 */
1086 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001087 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301088 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001089 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301090 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001091 }
1092
1093 /* HW may lockup when VLAN HW tagging is requested on
1094 * certain ipv6 packets. Drop such pkts if the HW workaround to
1095 * skip HW tagging is not enabled by FW.
1096 */
1097 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301098 (adapter->pvid || adapter->qnq_vid) &&
1099 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001100 goto tx_drop;
1101
1102 /* Manual VLAN tag insertion to prevent:
1103 * ASIC lockup when the ASIC inserts VLAN tag into
1104 * certain ipv6 packets. Insert VLAN tags in driver,
1105 * and set event, completion, vlan bits accordingly
1106 * in the Tx WRB.
1107 */
1108 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1109 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301110 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001111 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301112 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001113 }
1114
Sathya Perlaee9c7992013-05-22 23:04:55 +00001115 return skb;
1116tx_drop:
1117 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301118err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001119 return NULL;
1120}
1121
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301122static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1123 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301124 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301125{
Suresh Reddy8227e992015-10-12 03:47:19 -04001126 /* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1127 * packets that are 32b or less may cause a transmit stall
1128 * on that port. The workaround is to pad such packets
1129 * (len <= 32 bytes) to a minimum length of 36b.
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301130 */
Suresh Reddy8227e992015-10-12 03:47:19 -04001131 if (skb->len <= 32) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001132 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301133 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301134 }
1135
1136 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301137 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301138 if (!skb)
1139 return NULL;
1140 }
1141
1142 return skb;
1143}
1144
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001145static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1146{
1147 struct be_queue_info *txq = &txo->q;
1148 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1149
1150 /* Mark the last request eventable if it hasn't been marked already */
1151 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1152 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1153
1154 /* compose a dummy wrb if there are odd set of wrbs to notify */
1155 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001156 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001157 queue_head_inc(txq);
1158 atomic_inc(&txq->used);
1159 txo->pend_wrb_cnt++;
1160 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1161 TX_HDR_WRB_NUM_SHIFT);
1162 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1163 TX_HDR_WRB_NUM_SHIFT);
1164 }
1165 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1166 txo->pend_wrb_cnt = 0;
1167}
1168
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301169/* OS2BMC related */
1170
1171#define DHCP_CLIENT_PORT 68
1172#define DHCP_SERVER_PORT 67
1173#define NET_BIOS_PORT1 137
1174#define NET_BIOS_PORT2 138
1175#define DHCPV6_RAS_PORT 547
1176
1177#define is_mc_allowed_on_bmc(adapter, eh) \
1178 (!is_multicast_filt_enabled(adapter) && \
1179 is_multicast_ether_addr(eh->h_dest) && \
1180 !is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_bc_allowed_on_bmc(adapter, eh) \
1183 (!is_broadcast_filt_enabled(adapter) && \
1184 is_broadcast_ether_addr(eh->h_dest))
1185
1186#define is_arp_allowed_on_bmc(adapter, skb) \
1187 (is_arp(skb) && is_arp_filt_enabled(adapter))
1188
1189#define is_broadcast_packet(eh, adapter) \
1190 (is_multicast_ether_addr(eh->h_dest) && \
1191 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1192
1193#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1194
1195#define is_arp_filt_enabled(adapter) \
1196 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1197
1198#define is_dhcp_client_filt_enabled(adapter) \
1199 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1200
1201#define is_dhcp_srvr_filt_enabled(adapter) \
1202 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1203
1204#define is_nbios_filt_enabled(adapter) \
1205 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1206
1207#define is_ipv6_na_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & \
1209 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1210
1211#define is_ipv6_ra_filt_enabled(adapter) \
1212 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1213
1214#define is_ipv6_ras_filt_enabled(adapter) \
1215 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1216
1217#define is_broadcast_filt_enabled(adapter) \
1218 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1219
1220#define is_multicast_filt_enabled(adapter) \
1221 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1222
1223static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1224 struct sk_buff **skb)
1225{
1226 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1227 bool os2bmc = false;
1228
1229 if (!be_is_os2bmc_enabled(adapter))
1230 goto done;
1231
1232 if (!is_multicast_ether_addr(eh->h_dest))
1233 goto done;
1234
1235 if (is_mc_allowed_on_bmc(adapter, eh) ||
1236 is_bc_allowed_on_bmc(adapter, eh) ||
1237 is_arp_allowed_on_bmc(adapter, (*skb))) {
1238 os2bmc = true;
1239 goto done;
1240 }
1241
1242 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1243 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1244 u8 nexthdr = hdr->nexthdr;
1245
1246 if (nexthdr == IPPROTO_ICMPV6) {
1247 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1248
1249 switch (icmp6->icmp6_type) {
1250 case NDISC_ROUTER_ADVERTISEMENT:
1251 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1252 goto done;
1253 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1254 os2bmc = is_ipv6_na_filt_enabled(adapter);
1255 goto done;
1256 default:
1257 break;
1258 }
1259 }
1260 }
1261
1262 if (is_udp_pkt((*skb))) {
1263 struct udphdr *udp = udp_hdr((*skb));
1264
Venkat Duvvuru1645d992015-07-10 05:32:47 -04001265 switch (ntohs(udp->dest)) {
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301266 case DHCP_CLIENT_PORT:
1267 os2bmc = is_dhcp_client_filt_enabled(adapter);
1268 goto done;
1269 case DHCP_SERVER_PORT:
1270 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1271 goto done;
1272 case NET_BIOS_PORT1:
1273 case NET_BIOS_PORT2:
1274 os2bmc = is_nbios_filt_enabled(adapter);
1275 goto done;
1276 case DHCPV6_RAS_PORT:
1277 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1278 goto done;
1279 default:
1280 break;
1281 }
1282 }
1283done:
1284 /* For packets over a vlan, which are destined
1285 * to BMC, asic expects the vlan to be inline in the packet.
1286 */
1287 if (os2bmc)
1288 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1289
1290 return os2bmc;
1291}
1292
Sathya Perlaee9c7992013-05-22 23:04:55 +00001293static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1294{
1295 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001296 u16 q_idx = skb_get_queue_mapping(skb);
1297 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301298 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301299 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001300 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001301
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301302 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001303 if (unlikely(!skb))
1304 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001305
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301306 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1307
1308 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001309 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001310 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001311 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001312 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001313
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301314 /* if os2bmc is enabled and if the pkt is destined to bmc,
1315 * enqueue the pkt a 2nd time with mgmt bit set.
1316 */
1317 if (be_send_pkt_to_bmc(adapter, &skb)) {
1318 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1319 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1320 if (unlikely(!wrb_cnt))
1321 goto drop;
1322 else
1323 skb_get(skb);
1324 }
1325
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301326 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001327 netif_stop_subqueue(netdev, q_idx);
1328 tx_stats(txo)->tx_stops++;
1329 }
1330
1331 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1332 be_xmit_flush(adapter, txo);
1333
1334 return NETDEV_TX_OK;
1335drop:
1336 tx_stats(txo)->tx_drv_drops++;
1337 /* Flush the already enqueued tx requests */
1338 if (flush && txo->pend_wrb_cnt)
1339 be_xmit_flush(adapter, txo);
1340
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341 return NETDEV_TX_OK;
1342}
1343
1344static int be_change_mtu(struct net_device *netdev, int new_mtu)
1345{
1346 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301347 struct device *dev = &adapter->pdev->dev;
1348
1349 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1350 dev_info(dev, "MTU must be between %d and %d bytes\n",
1351 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001352 return -EINVAL;
1353 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301354
1355 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301356 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357 netdev->mtu = new_mtu;
1358 return 0;
1359}
1360
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001361static inline bool be_in_all_promisc(struct be_adapter *adapter)
1362{
1363 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1364 BE_IF_FLAGS_ALL_PROMISCUOUS;
1365}
1366
1367static int be_set_vlan_promisc(struct be_adapter *adapter)
1368{
1369 struct device *dev = &adapter->pdev->dev;
1370 int status;
1371
1372 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1373 return 0;
1374
1375 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1376 if (!status) {
1377 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1378 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1379 } else {
1380 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1381 }
1382 return status;
1383}
1384
1385static int be_clear_vlan_promisc(struct be_adapter *adapter)
1386{
1387 struct device *dev = &adapter->pdev->dev;
1388 int status;
1389
1390 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1391 if (!status) {
1392 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1393 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1394 }
1395 return status;
1396}
1397
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001398/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001399 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1400 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001401 */
Sathya Perla10329df2012-06-05 19:37:18 +00001402static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001403{
Vasundhara Volam50762662014-09-12 17:39:14 +05301404 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001405 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301406 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001407 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001408
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001409 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001410 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001411 return 0;
1412
Sathya Perla92bf14a2013-08-27 16:57:32 +05301413 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001414 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001415
1416 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301417 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1418 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001419
Vasundhara Volam435452a2015-03-20 06:28:23 -04001420 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001421 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001422 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001423 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001424 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1425 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301426 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001427 return be_set_vlan_promisc(adapter);
1428 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1429 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001431 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001432}
1433
Patrick McHardy80d5c362013-04-19 02:04:28 +00001434static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435{
1436 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001437 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001438
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001439 /* Packets with VID 0 are always received by Lancer by default */
1440 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301441 return status;
1442
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301443 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301444 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001445
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301446 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301447 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001448
Somnath Kotura6b74e02014-01-21 15:50:55 +05301449 status = be_vid_config(adapter);
1450 if (status) {
1451 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301452 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301453 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301454
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001455 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456}
1457
Patrick McHardy80d5c362013-04-19 02:04:28 +00001458static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001459{
1460 struct be_adapter *adapter = netdev_priv(netdev);
1461
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001462 /* Packets with VID 0 are always received by Lancer by default */
1463 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301464 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001465
Sriharsha Basavapatna41dcdfb2016-02-03 09:49:18 +05301466 if (!test_bit(vid, adapter->vids))
1467 return 0;
1468
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301469 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301470 adapter->vlans_added--;
1471
1472 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001473}
1474
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001475static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301476{
Sathya Perlaac34b742015-02-06 08:18:40 -05001477 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001478 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1479}
1480
1481static void be_set_all_promisc(struct be_adapter *adapter)
1482{
1483 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1484 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1485}
1486
1487static void be_set_mc_promisc(struct be_adapter *adapter)
1488{
1489 int status;
1490
1491 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1492 return;
1493
1494 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1495 if (!status)
1496 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1497}
1498
1499static void be_set_mc_list(struct be_adapter *adapter)
1500{
1501 int status;
1502
1503 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1504 if (!status)
1505 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1506 else
1507 be_set_mc_promisc(adapter);
1508}
1509
1510static void be_set_uc_list(struct be_adapter *adapter)
1511{
1512 struct netdev_hw_addr *ha;
1513 int i = 1; /* First slot is claimed by the Primary MAC */
1514
1515 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1516 be_cmd_pmac_del(adapter, adapter->if_handle,
1517 adapter->pmac_id[i], 0);
1518
1519 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1520 be_set_all_promisc(adapter);
1521 return;
1522 }
1523
1524 netdev_for_each_uc_addr(ha, adapter->netdev) {
1525 adapter->uc_macs++; /* First slot is for Primary MAC */
1526 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1527 &adapter->pmac_id[adapter->uc_macs], 0);
1528 }
1529}
1530
1531static void be_clear_uc_list(struct be_adapter *adapter)
1532{
1533 int i;
1534
1535 for (i = 1; i < (adapter->uc_macs + 1); i++)
1536 be_cmd_pmac_del(adapter, adapter->if_handle,
1537 adapter->pmac_id[i], 0);
1538 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301539}
1540
Sathya Perlaa54769f2011-10-24 02:45:00 +00001541static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542{
1543 struct be_adapter *adapter = netdev_priv(netdev);
1544
1545 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001546 be_set_all_promisc(adapter);
1547 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001549
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001550 /* Interface was previously in promiscuous mode; disable it */
1551 if (be_in_all_promisc(adapter)) {
1552 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001553 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001554 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001555 }
1556
Sathya Perlae7b909a2009-11-22 22:01:10 +00001557 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001558 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001559 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1560 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301561 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001562 }
Kalesh APa0794882014-05-30 19:06:23 +05301563
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001564 if (netdev_uc_count(netdev) != adapter->uc_macs)
1565 be_set_uc_list(adapter);
1566
1567 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568}
1569
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001570static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1571{
1572 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001573 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001574 int status;
1575
Sathya Perla11ac75e2011-12-13 00:58:50 +00001576 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001577 return -EPERM;
1578
Sathya Perla11ac75e2011-12-13 00:58:50 +00001579 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001580 return -EINVAL;
1581
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301582 /* Proceed further only if user provided MAC is different
1583 * from active MAC
1584 */
1585 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1586 return 0;
1587
Sathya Perla3175d8c2013-07-23 15:25:03 +05301588 if (BEx_chip(adapter)) {
1589 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1590 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001591
Sathya Perla11ac75e2011-12-13 00:58:50 +00001592 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1593 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301594 } else {
1595 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1596 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001597 }
1598
Kalesh APabccf232014-07-17 16:20:24 +05301599 if (status) {
1600 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1601 mac, vf, status);
1602 return be_cmd_status(status);
1603 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001604
Kalesh APabccf232014-07-17 16:20:24 +05301605 ether_addr_copy(vf_cfg->mac_addr, mac);
1606
1607 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001608}
1609
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001610static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301611 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001612{
1613 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001614 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001615
Sathya Perla11ac75e2011-12-13 00:58:50 +00001616 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001617 return -EPERM;
1618
Sathya Perla11ac75e2011-12-13 00:58:50 +00001619 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001620 return -EINVAL;
1621
1622 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001623 vi->max_tx_rate = vf_cfg->tx_rate;
1624 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001625 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1626 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001627 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301628 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001629 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001630
1631 return 0;
1632}
1633
Vasundhara Volam435452a2015-03-20 06:28:23 -04001634static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1635{
1636 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1637 u16 vids[BE_NUM_VLANS_SUPPORTED];
1638 int vf_if_id = vf_cfg->if_handle;
1639 int status;
1640
1641 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001642 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001643 if (status)
1644 return status;
1645
1646 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1647 vids[0] = 0;
1648 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1649 if (!status)
1650 dev_info(&adapter->pdev->dev,
1651 "Cleared guest VLANs on VF%d", vf);
1652
1653 /* After TVT is enabled, disallow VFs to program VLAN filters */
1654 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1655 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1656 ~BE_PRIV_FILTMGMT, vf + 1);
1657 if (!status)
1658 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1659 }
1660 return 0;
1661}
1662
1663static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1664{
1665 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1666 struct device *dev = &adapter->pdev->dev;
1667 int status;
1668
1669 /* Reset Transparent VLAN Tagging. */
1670 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001671 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001672 if (status)
1673 return status;
1674
1675 /* Allow VFs to program VLAN filtering */
1676 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1677 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1678 BE_PRIV_FILTMGMT, vf + 1);
1679 if (!status) {
1680 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1681 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1682 }
1683 }
1684
1685 dev_info(dev,
1686 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1687 return 0;
1688}
1689
Sathya Perla748b5392014-05-09 13:29:13 +05301690static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001691{
1692 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001693 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001694 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001695
Sathya Perla11ac75e2011-12-13 00:58:50 +00001696 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001697 return -EPERM;
1698
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001699 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001700 return -EINVAL;
1701
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001702 if (vlan || qos) {
1703 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001704 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001705 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001706 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001707 }
1708
Kalesh APabccf232014-07-17 16:20:24 +05301709 if (status) {
1710 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001711 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1712 status);
Kalesh APabccf232014-07-17 16:20:24 +05301713 return be_cmd_status(status);
1714 }
1715
1716 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301717 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001718}
1719
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001720static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1721 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001722{
1723 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301724 struct device *dev = &adapter->pdev->dev;
1725 int percent_rate, status = 0;
1726 u16 link_speed = 0;
1727 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001728
Sathya Perla11ac75e2011-12-13 00:58:50 +00001729 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001730 return -EPERM;
1731
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001732 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001733 return -EINVAL;
1734
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001735 if (min_tx_rate)
1736 return -EINVAL;
1737
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301738 if (!max_tx_rate)
1739 goto config_qos;
1740
1741 status = be_cmd_link_status_query(adapter, &link_speed,
1742 &link_status, 0);
1743 if (status)
1744 goto err;
1745
1746 if (!link_status) {
1747 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301748 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301749 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001750 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001751
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301752 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1753 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1754 link_speed);
1755 status = -EINVAL;
1756 goto err;
1757 }
1758
1759 /* On Skyhawk the QOS setting must be done only as a % value */
1760 percent_rate = link_speed / 100;
1761 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1762 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1763 percent_rate);
1764 status = -EINVAL;
1765 goto err;
1766 }
1767
1768config_qos:
1769 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001770 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301771 goto err;
1772
1773 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1774 return 0;
1775
1776err:
1777 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1778 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301779 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001780}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301781
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301782static int be_set_vf_link_state(struct net_device *netdev, int vf,
1783 int link_state)
1784{
1785 struct be_adapter *adapter = netdev_priv(netdev);
1786 int status;
1787
1788 if (!sriov_enabled(adapter))
1789 return -EPERM;
1790
1791 if (vf >= adapter->num_vfs)
1792 return -EINVAL;
1793
1794 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301795 if (status) {
1796 dev_err(&adapter->pdev->dev,
1797 "Link state change on VF %d failed: %#x\n", vf, status);
1798 return be_cmd_status(status);
1799 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301800
Kalesh APabccf232014-07-17 16:20:24 +05301801 adapter->vf_cfg[vf].plink_tracking = link_state;
1802
1803 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301804}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001805
Kalesh APe7bcbd72015-05-06 05:30:32 -04001806static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1807{
1808 struct be_adapter *adapter = netdev_priv(netdev);
1809 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1810 u8 spoofchk;
1811 int status;
1812
1813 if (!sriov_enabled(adapter))
1814 return -EPERM;
1815
1816 if (vf >= adapter->num_vfs)
1817 return -EINVAL;
1818
1819 if (BEx_chip(adapter))
1820 return -EOPNOTSUPP;
1821
1822 if (enable == vf_cfg->spoofchk)
1823 return 0;
1824
1825 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1826
1827 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1828 0, spoofchk);
1829 if (status) {
1830 dev_err(&adapter->pdev->dev,
1831 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1832 return be_cmd_status(status);
1833 }
1834
1835 vf_cfg->spoofchk = enable;
1836 return 0;
1837}
1838
Sathya Perla2632baf2013-10-01 16:00:00 +05301839static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1840 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841{
Sathya Perla2632baf2013-10-01 16:00:00 +05301842 aic->rx_pkts_prev = rx_pkts;
1843 aic->tx_reqs_prev = tx_pkts;
1844 aic->jiffies = now;
1845}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001846
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001847static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301848{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001849 struct be_adapter *adapter = eqo->adapter;
1850 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301851 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301852 struct be_rx_obj *rxo;
1853 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001854 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301855 ulong now;
1856 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001857 int i;
1858
1859 aic = &adapter->aic_obj[eqo->idx];
1860 if (!aic->enable) {
1861 if (aic->jiffies)
1862 aic->jiffies = 0;
1863 eqd = aic->et_eqd;
1864 return eqd;
1865 }
1866
1867 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1868 do {
1869 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1870 rx_pkts += rxo->stats.rx_pkts;
1871 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1872 }
1873
1874 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1875 do {
1876 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1877 tx_pkts += txo->stats.tx_reqs;
1878 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1879 }
1880
1881 /* Skip, if wrapped around or first calculation */
1882 now = jiffies;
1883 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1884 rx_pkts < aic->rx_pkts_prev ||
1885 tx_pkts < aic->tx_reqs_prev) {
1886 be_aic_update(aic, rx_pkts, tx_pkts, now);
1887 return aic->prev_eqd;
1888 }
1889
1890 delta = jiffies_to_msecs(now - aic->jiffies);
1891 if (delta == 0)
1892 return aic->prev_eqd;
1893
1894 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1895 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1896 eqd = (pps / 15000) << 2;
1897
1898 if (eqd < 8)
1899 eqd = 0;
1900 eqd = min_t(u32, eqd, aic->max_eqd);
1901 eqd = max_t(u32, eqd, aic->min_eqd);
1902
1903 be_aic_update(aic, rx_pkts, tx_pkts, now);
1904
1905 return eqd;
1906}
1907
1908/* For Skyhawk-R only */
1909static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1910{
1911 struct be_adapter *adapter = eqo->adapter;
1912 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1913 ulong now = jiffies;
1914 int eqd;
1915 u32 mult_enc;
1916
1917 if (!aic->enable)
1918 return 0;
1919
Padmanabh Ratnakar3c0d49a2016-02-03 09:49:23 +05301920 if (jiffies_to_msecs(now - aic->jiffies) < 1)
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001921 eqd = aic->prev_eqd;
1922 else
1923 eqd = be_get_new_eqd(eqo);
1924
1925 if (eqd > 100)
1926 mult_enc = R2I_DLY_ENC_1;
1927 else if (eqd > 60)
1928 mult_enc = R2I_DLY_ENC_2;
1929 else if (eqd > 20)
1930 mult_enc = R2I_DLY_ENC_3;
1931 else
1932 mult_enc = R2I_DLY_ENC_0;
1933
1934 aic->prev_eqd = eqd;
1935
1936 return mult_enc;
1937}
1938
1939void be_eqd_update(struct be_adapter *adapter, bool force_update)
1940{
1941 struct be_set_eqd set_eqd[MAX_EVT_QS];
1942 struct be_aic_obj *aic;
1943 struct be_eq_obj *eqo;
1944 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001945
Sathya Perla2632baf2013-10-01 16:00:00 +05301946 for_all_evt_queues(adapter, eqo, i) {
1947 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001948 eqd = be_get_new_eqd(eqo);
1949 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301950 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1951 set_eqd[num].eq_id = eqo->q.id;
1952 aic->prev_eqd = eqd;
1953 num++;
1954 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001955 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301956
1957 if (num)
1958 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001959}
1960
Sathya Perla3abcded2010-10-03 22:12:27 -07001961static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301962 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001963{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001964 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001965
Sathya Perlaab1594e2011-07-25 19:10:15 +00001966 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001967 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001968 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001969 stats->rx_pkts++;
Sriharsha Basavapatna8670f2a2015-07-29 19:35:32 +05301970 if (rxcp->tunneled)
1971 stats->rx_vxlan_offload_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001972 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001973 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001974 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001975 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001976 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977}
1978
Sathya Perla2e588f82011-03-11 02:49:26 +00001979static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001980{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001981 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301982 * Also ignore ipcksm for ipv6 pkts
1983 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001984 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301985 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001986}
1987
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301988static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001990 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001992 struct be_queue_info *rxq = &rxo->q;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05301993 u32 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001994
Sathya Perla3abcded2010-10-03 22:12:27 -07001995 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996 BUG_ON(!rx_page_info->page);
1997
Sathya Perlae50287b2014-03-04 12:14:38 +05301998 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001999 dma_unmap_page(&adapter->pdev->dev,
2000 dma_unmap_addr(rx_page_info, bus),
2001 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05302002 rx_page_info->last_frag = false;
2003 } else {
2004 dma_sync_single_for_cpu(&adapter->pdev->dev,
2005 dma_unmap_addr(rx_page_info, bus),
2006 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00002007 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002008
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302009 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010 atomic_dec(&rxq->used);
2011 return rx_page_info;
2012}
2013
2014/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002015static void be_rx_compl_discard(struct be_rx_obj *rxo,
2016 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002018 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002019 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002021 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302022 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002023 put_page(page_info->page);
2024 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002025 }
2026}
2027
2028/*
2029 * skb_fill_rx_data forms a complete skb for an ether frame
2030 * indicated by rxcp.
2031 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002032static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2033 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002034{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002036 u16 i, j;
2037 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038 u8 *start;
2039
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302040 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002041 start = page_address(page_info->page) + page_info->page_offset;
2042 prefetch(start);
2043
2044 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002045 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002046
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047 skb->len = curr_frag_len;
2048 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002049 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002050 /* Complete packet has now been moved to data */
2051 put_page(page_info->page);
2052 skb->data_len = 0;
2053 skb->tail += curr_frag_len;
2054 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002055 hdr_len = ETH_HLEN;
2056 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002058 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002059 skb_shinfo(skb)->frags[0].page_offset =
2060 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302061 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2062 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002063 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002064 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002065 skb->tail += hdr_len;
2066 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002067 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068
Sathya Perla2e588f82011-03-11 02:49:26 +00002069 if (rxcp->pkt_size <= rx_frag_size) {
2070 BUG_ON(rxcp->num_rcvd != 1);
2071 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072 }
2073
2074 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002075 remaining = rxcp->pkt_size - curr_frag_len;
2076 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302077 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002078 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002080 /* Coalesce all frags from the same physical page in one slot */
2081 if (page_info->page_offset == 0) {
2082 /* Fresh page */
2083 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002084 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002085 skb_shinfo(skb)->frags[j].page_offset =
2086 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002087 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002088 skb_shinfo(skb)->nr_frags++;
2089 } else {
2090 put_page(page_info->page);
2091 }
2092
Eric Dumazet9e903e02011-10-18 21:00:24 +00002093 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002094 skb->len += curr_frag_len;
2095 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002096 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002097 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002098 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002099 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002100 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002101}
2102
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002103/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302104static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002105 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002106{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002107 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002108 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002109 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002110
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002111 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002112 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002113 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002114 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002115 return;
2116 }
2117
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002118 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002119
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002120 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002121 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002122 else
2123 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002125 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002126 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002127 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002128 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302129
Tom Herbertb6c0e892014-08-27 21:27:17 -07002130 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302131 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002132
Jiri Pirko343e43c2011-08-25 02:50:51 +00002133 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002134 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002135
2136 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137}
2138
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002139/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002140static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2141 struct napi_struct *napi,
2142 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002143{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002144 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002145 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002146 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002147 u16 remaining, curr_frag_len;
2148 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002149
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002150 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002151 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002152 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002153 return;
2154 }
2155
Sathya Perla2e588f82011-03-11 02:49:26 +00002156 remaining = rxcp->pkt_size;
2157 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302158 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002159
2160 curr_frag_len = min(remaining, rx_frag_size);
2161
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002162 /* Coalesce all frags from the same physical page in one slot */
2163 if (i == 0 || page_info->page_offset == 0) {
2164 /* First frag or Fresh page */
2165 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002166 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002167 skb_shinfo(skb)->frags[j].page_offset =
2168 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002169 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002170 } else {
2171 put_page(page_info->page);
2172 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002173 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002174 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002175 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176 memset(page_info, 0, sizeof(*page_info));
2177 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002178 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002179
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002180 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002181 skb->len = rxcp->pkt_size;
2182 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002183 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002184 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002185 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002186 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302187
Tom Herbertb6c0e892014-08-27 21:27:17 -07002188 skb->csum_level = rxcp->tunneled;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002189
Jiri Pirko343e43c2011-08-25 02:50:51 +00002190 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002191 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002192
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002193 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194}
2195
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002196static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2197 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002198{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302199 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2200 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2201 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2202 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2203 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2204 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2205 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2206 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2207 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2208 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2209 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002210 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302211 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2212 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002213 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302214 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302215 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302216 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002217}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002218
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002219static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2220 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002221{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302222 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2223 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2224 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2225 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2226 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2227 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2228 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2229 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2230 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2231 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2232 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002233 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302234 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2235 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002236 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302237 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2238 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002239}
2240
2241static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2242{
2243 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2244 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2245 struct be_adapter *adapter = rxo->adapter;
2246
2247 /* For checking the valid bit it is Ok to use either definition as the
2248 * valid bit is at the same position in both v0 and v1 Rx compl */
2249 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 return NULL;
2251
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002252 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002253 be_dws_le_to_cpu(compl, sizeof(*compl));
2254
2255 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002256 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002257 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002258 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002259
Somnath Koture38b1702013-05-29 22:55:56 +00002260 if (rxcp->ip_frag)
2261 rxcp->l4_csum = 0;
2262
Sathya Perla15d72182011-03-21 20:49:26 +00002263 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302264 /* In QNQ modes, if qnq bit is not set, then the packet was
2265 * tagged only with the transparent outer vlan-tag and must
2266 * not be treated as a vlan packet by host
2267 */
2268 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002269 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002270
Sathya Perla15d72182011-03-21 20:49:26 +00002271 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002272 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002273
Somnath Kotur939cf302011-08-18 21:51:49 -07002274 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302275 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002276 rxcp->vlanf = 0;
2277 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002278
2279 /* As the compl has been parsed, reset it; we wont touch it again */
2280 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281
Sathya Perla3abcded2010-10-03 22:12:27 -07002282 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002283 return rxcp;
2284}
2285
Eric Dumazet1829b082011-03-01 05:48:12 +00002286static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002289
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002291 gfp |= __GFP_COMP;
2292 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293}
2294
2295/*
2296 * Allocate a page, split it to fragments of size rx_frag_size and post as
2297 * receive buffers to BE
2298 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302299static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300{
Sathya Perla3abcded2010-10-03 22:12:27 -07002301 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002302 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002303 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002305 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002306 struct be_eth_rx_d *rxd;
2307 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302308 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309
Sathya Perla3abcded2010-10-03 22:12:27 -07002310 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302311 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002313 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002315 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002316 break;
2317 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002318 page_dmaaddr = dma_map_page(dev, pagep, 0,
2319 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002320 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002321 if (dma_mapping_error(dev, page_dmaaddr)) {
2322 put_page(pagep);
2323 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302324 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002325 break;
2326 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302327 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328 } else {
2329 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302330 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002331 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302332 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002333 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334
2335 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302336 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002337 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2338 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002339
2340 /* Any space left in the current big page for another frag? */
2341 if ((page_offset + rx_frag_size + rx_frag_size) >
2342 adapter->big_page_size) {
2343 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302344 page_info->last_frag = true;
2345 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2346 } else {
2347 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002348 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002349
2350 prev_page_info = page_info;
2351 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002352 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002353 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302354
2355 /* Mark the last frag of a page when we break out of the above loop
2356 * with no more slots available in the RXQ
2357 */
2358 if (pagep) {
2359 prev_page_info->last_frag = true;
2360 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2361 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002362
2363 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002364 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302365 if (rxo->rx_post_starved)
2366 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302367 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002368 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302369 be_rxq_notify(adapter, rxq->id, notify);
2370 posted -= notify;
2371 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002372 } else if (atomic_read(&rxq->used) == 0) {
2373 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002374 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002375 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376}
2377
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302378static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302380 struct be_queue_info *tx_cq = &txo->cq;
2381 struct be_tx_compl_info *txcp = &txo->txcp;
2382 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302384 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002385 return NULL;
2386
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302387 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002388 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302389 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302391 txcp->status = GET_TX_COMPL_BITS(status, compl);
2392 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302394 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002395 queue_tail_inc(tx_cq);
2396 return txcp;
2397}
2398
Sathya Perla3c8def92011-06-12 20:01:58 +00002399static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302400 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401{
Sathya Perla3c8def92011-06-12 20:01:58 +00002402 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002403 struct be_queue_info *txq = &txo->q;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002404 struct sk_buff *skb = NULL;
2405 bool unmap_skb_hdr = false;
2406 struct be_eth_wrb *wrb;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302407 u16 num_wrbs = 0;
2408 u32 frag_index;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002409
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002410 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002411 if (sent_skbs[txq->tail]) {
2412 /* Free skb from prev req */
2413 if (skb)
2414 dev_consume_skb_any(skb);
2415 skb = sent_skbs[txq->tail];
2416 sent_skbs[txq->tail] = NULL;
2417 queue_tail_inc(txq); /* skip hdr wrb */
2418 num_wrbs++;
2419 unmap_skb_hdr = true;
2420 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002421 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002422 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002423 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002424 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002425 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002426 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002427 num_wrbs++;
2428 } while (frag_index != last_index);
2429 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002430
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002431 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002432}
2433
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002434/* Return the number of events in the event queue */
2435static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002436{
2437 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002439
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 do {
2441 eqe = queue_tail_node(&eqo->q);
2442 if (eqe->evt == 0)
2443 break;
2444
2445 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002446 eqe->evt = 0;
2447 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002448 queue_tail_inc(&eqo->q);
2449 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002450
2451 return num;
2452}
2453
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002454/* Leaves the EQ is disarmed state */
2455static void be_eq_clean(struct be_eq_obj *eqo)
2456{
2457 int num = events_get(eqo);
2458
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002459 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002460}
2461
Kalesh AP99b44302015-08-05 03:27:49 -04002462/* Free posted rx buffers that were not used */
2463static void be_rxq_clean(struct be_rx_obj *rxo)
2464{
2465 struct be_queue_info *rxq = &rxo->q;
2466 struct be_rx_page_info *page_info;
2467
2468 while (atomic_read(&rxq->used) > 0) {
2469 page_info = get_rx_page_info(rxo);
2470 put_page(page_info->page);
2471 memset(page_info, 0, sizeof(*page_info));
2472 }
2473 BUG_ON(atomic_read(&rxq->used));
2474 rxq->tail = 0;
2475 rxq->head = 0;
2476}
2477
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002478static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479{
Sathya Perla3abcded2010-10-03 22:12:27 -07002480 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002481 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002482 struct be_adapter *adapter = rxo->adapter;
2483 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002484
Sathya Perlad23e9462012-12-17 19:38:51 +00002485 /* Consume pending rx completions.
2486 * Wait for the flush completion (identified by zero num_rcvd)
2487 * to arrive. Notify CQ even when there are no more CQ entries
2488 * for HW to flush partially coalesced CQ entries.
2489 * In Lancer, there is no need to wait for flush compl.
2490 */
2491 for (;;) {
2492 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302493 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002494 if (lancer_chip(adapter))
2495 break;
2496
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302497 if (flush_wait++ > 50 ||
2498 be_check_error(adapter,
2499 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002500 dev_warn(&adapter->pdev->dev,
2501 "did not receive flush compl\n");
2502 break;
2503 }
2504 be_cq_notify(adapter, rx_cq->id, true, 0);
2505 mdelay(1);
2506 } else {
2507 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002508 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002509 if (rxcp->num_rcvd == 0)
2510 break;
2511 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002512 }
2513
Sathya Perlad23e9462012-12-17 19:38:51 +00002514 /* After cleanup, leave the CQ in unarmed state */
2515 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516}
2517
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002518static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002519{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002520 struct device *dev = &adapter->pdev->dev;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302521 u16 cmpl = 0, timeo = 0, num_wrbs = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302522 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002523 struct be_queue_info *txq;
ajit.khaparde@broadcom.comb0fd2eb2016-02-23 00:33:48 +05302524 u32 end_idx, notified_idx;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302525 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002526 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002527
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302528 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002529 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002530 pending_txqs = adapter->num_tx_qs;
2531
2532 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302533 cmpl = 0;
2534 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002535 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302536 while ((txcp = be_tx_compl_get(txo))) {
2537 num_wrbs +=
2538 be_tx_compl_process(adapter, txo,
2539 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002540 cmpl++;
2541 }
2542 if (cmpl) {
2543 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2544 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302545 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002546 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302547 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002548 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002549 }
2550
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302551 if (pending_txqs == 0 || ++timeo > 10 ||
2552 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002553 break;
2554
2555 mdelay(1);
2556 } while (true);
2557
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002558 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002559 for_all_tx_queues(adapter, txo, i) {
2560 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002561
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002562 if (atomic_read(&txq->used)) {
2563 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2564 i, atomic_read(&txq->used));
2565 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002566 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002567 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2568 txq->len);
2569 /* Use the tx-compl process logic to handle requests
2570 * that were not sent to the HW.
2571 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002572 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2573 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002574 BUG_ON(atomic_read(&txq->used));
2575 txo->pend_wrb_cnt = 0;
2576 /* Since hw was never notified of these requests,
2577 * reset TXQ indices
2578 */
2579 txq->head = notified_idx;
2580 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002581 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002582 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002583}
2584
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002585static void be_evt_queues_destroy(struct be_adapter *adapter)
2586{
2587 struct be_eq_obj *eqo;
2588 int i;
2589
2590 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002591 if (eqo->q.created) {
2592 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002593 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302594 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302595 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002596 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002597 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002598 be_queue_free(adapter, &eqo->q);
2599 }
2600}
2601
2602static int be_evt_queues_create(struct be_adapter *adapter)
2603{
2604 struct be_queue_info *eq;
2605 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302606 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002607 int i, rc;
2608
Sathya Perla92bf14a2013-08-27 16:57:32 +05302609 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2610 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002611
2612 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302613 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002614
Sathya Perla2632baf2013-10-01 16:00:00 +05302615 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002616 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002617 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302618 aic->max_eqd = BE_MAX_EQD;
2619 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002620
2621 eq = &eqo->q;
2622 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302623 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002624 if (rc)
2625 return rc;
2626
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302627 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002628 if (rc)
2629 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002630
2631 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2632 return -ENOMEM;
2633 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2634 eqo->affinity_mask);
2635 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2636 BE_NAPI_WEIGHT);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002637 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002638 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002639}
2640
Sathya Perla5fb379e2009-06-18 00:02:59 +00002641static void be_mcc_queues_destroy(struct be_adapter *adapter)
2642{
2643 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002644
Sathya Perla8788fdc2009-07-27 22:52:03 +00002645 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002646 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002647 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002648 be_queue_free(adapter, q);
2649
Sathya Perla8788fdc2009-07-27 22:52:03 +00002650 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002651 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002652 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002653 be_queue_free(adapter, q);
2654}
2655
2656/* Must be called only after TX qs are created as MCC shares TX EQ */
2657static int be_mcc_queues_create(struct be_adapter *adapter)
2658{
2659 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002660
Sathya Perla8788fdc2009-07-27 22:52:03 +00002661 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002662 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302663 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002664 goto err;
2665
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002666 /* Use the default EQ for MCC completions */
2667 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002668 goto mcc_cq_free;
2669
Sathya Perla8788fdc2009-07-27 22:52:03 +00002670 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002671 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2672 goto mcc_cq_destroy;
2673
Sathya Perla8788fdc2009-07-27 22:52:03 +00002674 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002675 goto mcc_q_free;
2676
2677 return 0;
2678
2679mcc_q_free:
2680 be_queue_free(adapter, q);
2681mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002682 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002683mcc_cq_free:
2684 be_queue_free(adapter, cq);
2685err:
2686 return -1;
2687}
2688
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002689static void be_tx_queues_destroy(struct be_adapter *adapter)
2690{
2691 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002692 struct be_tx_obj *txo;
2693 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002694
Sathya Perla3c8def92011-06-12 20:01:58 +00002695 for_all_tx_queues(adapter, txo, i) {
2696 q = &txo->q;
2697 if (q->created)
2698 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2699 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002700
Sathya Perla3c8def92011-06-12 20:01:58 +00002701 q = &txo->cq;
2702 if (q->created)
2703 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2704 be_queue_free(adapter, q);
2705 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706}
2707
Sathya Perla77071332013-08-27 16:57:34 +05302708static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002709{
Sathya Perla73f394e2015-03-26 03:05:09 -04002710 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002711 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002712 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302713 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002714
Sathya Perla92bf14a2013-08-27 16:57:32 +05302715 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002716
Sathya Perla3c8def92011-06-12 20:01:58 +00002717 for_all_tx_queues(adapter, txo, i) {
2718 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2720 sizeof(struct be_eth_tx_compl));
2721 if (status)
2722 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002723
John Stultz827da442013-10-07 15:51:58 -07002724 u64_stats_init(&txo->stats.sync);
2725 u64_stats_init(&txo->stats.sync_compl);
2726
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002727 /* If num_evt_qs is less than num_tx_qs, then more than
2728 * one txq share an eq
2729 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002730 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2731 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002732 if (status)
2733 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002734
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002735 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2736 sizeof(struct be_eth_wrb));
2737 if (status)
2738 return status;
2739
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002740 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002741 if (status)
2742 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002743
2744 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2745 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002746 }
2747
Sathya Perlad3791422012-09-28 04:39:44 +00002748 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2749 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002750 return 0;
2751}
2752
2753static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754{
2755 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002756 struct be_rx_obj *rxo;
2757 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002758
Sathya Perla3abcded2010-10-03 22:12:27 -07002759 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002760 q = &rxo->cq;
2761 if (q->created)
2762 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2763 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002764 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002765}
2766
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002767static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002768{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002769 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002770 struct be_rx_obj *rxo;
2771 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002772
Sathya Perla92bf14a2013-08-27 16:57:32 +05302773 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002774 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302775
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002776 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2777 if (adapter->num_rss_qs <= 1)
2778 adapter->num_rss_qs = 0;
2779
2780 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2781
2782 /* When the interface is not capable of RSS rings (and there is no
2783 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002784 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002785 if (adapter->num_rx_qs == 0)
2786 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302787
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002788 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002789 for_all_rx_queues(adapter, rxo, i) {
2790 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002791 cq = &rxo->cq;
2792 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302793 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002794 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002795 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002796
John Stultz827da442013-10-07 15:51:58 -07002797 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2799 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002800 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002801 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002802 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002803
Sathya Perlad3791422012-09-28 04:39:44 +00002804 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002805 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002806 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002807}
2808
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002809static irqreturn_t be_intx(int irq, void *dev)
2810{
Sathya Perlae49cc342012-11-27 19:50:02 +00002811 struct be_eq_obj *eqo = dev;
2812 struct be_adapter *adapter = eqo->adapter;
2813 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002814
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002815 /* IRQ is not expected when NAPI is scheduled as the EQ
2816 * will not be armed.
2817 * But, this can happen on Lancer INTx where it takes
2818 * a while to de-assert INTx or in BE2 where occasionaly
2819 * an interrupt may be raised even when EQ is unarmed.
2820 * If NAPI is already scheduled, then counting & notifying
2821 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002822 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002823 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002824 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002825 __napi_schedule(&eqo->napi);
2826 if (num_evts)
2827 eqo->spurious_intr = 0;
2828 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002829 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002830
2831 /* Return IRQ_HANDLED only for the the first spurious intr
2832 * after a valid intr to stop the kernel from branding
2833 * this irq as a bad one!
2834 */
2835 if (num_evts || eqo->spurious_intr++ == 0)
2836 return IRQ_HANDLED;
2837 else
2838 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002839}
2840
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002841static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002842{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002843 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002844
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002845 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002846 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002847 return IRQ_HANDLED;
2848}
2849
Sathya Perla2e588f82011-03-11 02:49:26 +00002850static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002851{
Somnath Koture38b1702013-05-29 22:55:56 +00002852 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853}
2854
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002855static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302856 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002857{
Sathya Perla3abcded2010-10-03 22:12:27 -07002858 struct be_adapter *adapter = rxo->adapter;
2859 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002860 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002861 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302862 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002863
2864 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002865 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002866 if (!rxcp)
2867 break;
2868
Sathya Perla12004ae2011-08-02 19:57:46 +00002869 /* Is it a flush compl that has no data */
2870 if (unlikely(rxcp->num_rcvd == 0))
2871 goto loop_continue;
2872
2873 /* Discard compl with partial DMA Lancer B0 */
2874 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002875 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002876 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002877 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002878
Sathya Perla12004ae2011-08-02 19:57:46 +00002879 /* On BE drop pkts that arrive due to imperfect filtering in
2880 * promiscuous mode on some skews
2881 */
2882 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302883 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002884 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002885 goto loop_continue;
2886 }
2887
Sathya Perla6384a4d2013-10-25 10:40:16 +05302888 /* Don't do gro when we're busy_polling */
2889 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002890 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002891 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302892 be_rx_compl_process(rxo, napi, rxcp);
2893
Sathya Perla12004ae2011-08-02 19:57:46 +00002894loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302895 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002896 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002897 }
2898
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002899 if (work_done) {
2900 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002901
Sathya Perla6384a4d2013-10-25 10:40:16 +05302902 /* When an rx-obj gets into post_starved state, just
2903 * let be_worker do the posting.
2904 */
2905 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2906 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302907 be_post_rx_frags(rxo, GFP_ATOMIC,
2908 max_t(u32, MAX_RX_POST,
2909 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002910 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002911
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002912 return work_done;
2913}
2914
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302915static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302916{
2917 switch (status) {
2918 case BE_TX_COMP_HDR_PARSE_ERR:
2919 tx_stats(txo)->tx_hdr_parse_err++;
2920 break;
2921 case BE_TX_COMP_NDMA_ERR:
2922 tx_stats(txo)->tx_dma_err++;
2923 break;
2924 case BE_TX_COMP_ACL_ERR:
2925 tx_stats(txo)->tx_spoof_check_err++;
2926 break;
2927 }
2928}
2929
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302930static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302931{
2932 switch (status) {
2933 case LANCER_TX_COMP_LSO_ERR:
2934 tx_stats(txo)->tx_tso_err++;
2935 break;
2936 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2937 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2938 tx_stats(txo)->tx_spoof_check_err++;
2939 break;
2940 case LANCER_TX_COMP_QINQ_ERR:
2941 tx_stats(txo)->tx_qinq_err++;
2942 break;
2943 case LANCER_TX_COMP_PARITY_ERR:
2944 tx_stats(txo)->tx_internal_parity_err++;
2945 break;
2946 case LANCER_TX_COMP_DMA_ERR:
2947 tx_stats(txo)->tx_dma_err++;
2948 break;
2949 }
2950}
2951
Sathya Perlac8f64612014-09-02 09:56:55 +05302952static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2953 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002954{
Sathya Perlac8f64612014-09-02 09:56:55 +05302955 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302956 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002957
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302958 while ((txcp = be_tx_compl_get(txo))) {
2959 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302960 work_done++;
2961
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302962 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302963 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302964 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302965 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302966 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302967 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002968 }
2969
2970 if (work_done) {
2971 be_cq_notify(adapter, txo->cq.id, true, work_done);
2972 atomic_sub(num_wrbs, &txo->q.used);
2973
2974 /* As Tx wrbs have been freed up, wake up netdev queue
2975 * if it was stopped due to lack of tx wrbs. */
2976 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302977 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002978 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002979 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002980
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002981 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2982 tx_stats(txo)->tx_compl += work_done;
2983 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2984 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002985}
Sathya Perla3c8def92011-06-12 20:01:58 +00002986
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002987#ifdef CONFIG_NET_RX_BUSY_POLL
2988static inline bool be_lock_napi(struct be_eq_obj *eqo)
2989{
2990 bool status = true;
2991
2992 spin_lock(&eqo->lock); /* BH is already disabled */
2993 if (eqo->state & BE_EQ_LOCKED) {
2994 WARN_ON(eqo->state & BE_EQ_NAPI);
2995 eqo->state |= BE_EQ_NAPI_YIELD;
2996 status = false;
2997 } else {
2998 eqo->state = BE_EQ_NAPI;
2999 }
3000 spin_unlock(&eqo->lock);
3001 return status;
3002}
3003
3004static inline void be_unlock_napi(struct be_eq_obj *eqo)
3005{
3006 spin_lock(&eqo->lock); /* BH is already disabled */
3007
3008 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3009 eqo->state = BE_EQ_IDLE;
3010
3011 spin_unlock(&eqo->lock);
3012}
3013
3014static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3015{
3016 bool status = true;
3017
3018 spin_lock_bh(&eqo->lock);
3019 if (eqo->state & BE_EQ_LOCKED) {
3020 eqo->state |= BE_EQ_POLL_YIELD;
3021 status = false;
3022 } else {
3023 eqo->state |= BE_EQ_POLL;
3024 }
3025 spin_unlock_bh(&eqo->lock);
3026 return status;
3027}
3028
3029static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3030{
3031 spin_lock_bh(&eqo->lock);
3032
3033 WARN_ON(eqo->state & (BE_EQ_NAPI));
3034 eqo->state = BE_EQ_IDLE;
3035
3036 spin_unlock_bh(&eqo->lock);
3037}
3038
3039static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3040{
3041 spin_lock_init(&eqo->lock);
3042 eqo->state = BE_EQ_IDLE;
3043}
3044
3045static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3046{
3047 local_bh_disable();
3048
3049 /* It's enough to just acquire napi lock on the eqo to stop
3050 * be_busy_poll() from processing any queueus.
3051 */
3052 while (!be_lock_napi(eqo))
3053 mdelay(1);
3054
3055 local_bh_enable();
3056}
3057
3058#else /* CONFIG_NET_RX_BUSY_POLL */
3059
3060static inline bool be_lock_napi(struct be_eq_obj *eqo)
3061{
3062 return true;
3063}
3064
3065static inline void be_unlock_napi(struct be_eq_obj *eqo)
3066{
3067}
3068
3069static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3070{
3071 return false;
3072}
3073
3074static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3075{
3076}
3077
3078static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3079{
3080}
3081
3082static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3083{
3084}
3085#endif /* CONFIG_NET_RX_BUSY_POLL */
3086
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303087int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003088{
3089 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3090 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003091 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303092 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303093 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003094 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003095
Sathya Perla0b545a62012-11-23 00:27:18 +00003096 num_evts = events_get(eqo);
3097
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303098 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3099 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003100
Sathya Perla6384a4d2013-10-25 10:40:16 +05303101 if (be_lock_napi(eqo)) {
3102 /* This loop will iterate twice for EQ0 in which
3103 * completions of the last RXQ (default one) are also processed
3104 * For other EQs the loop iterates only once
3105 */
3106 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3107 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3108 max_work = max(work, max_work);
3109 }
3110 be_unlock_napi(eqo);
3111 } else {
3112 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003113 }
3114
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003115 if (is_mcc_eqo(eqo))
3116 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003117
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003118 if (max_work < budget) {
3119 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003120
3121 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3122 * delay via a delay multiplier encoding value
3123 */
3124 if (skyhawk_chip(adapter))
3125 mult_enc = be_get_eq_delay_mult_enc(eqo);
3126
3127 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3128 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003129 } else {
3130 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003131 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003132 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003133 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003134}
3135
Sathya Perla6384a4d2013-10-25 10:40:16 +05303136#ifdef CONFIG_NET_RX_BUSY_POLL
3137static int be_busy_poll(struct napi_struct *napi)
3138{
3139 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3140 struct be_adapter *adapter = eqo->adapter;
3141 struct be_rx_obj *rxo;
3142 int i, work = 0;
3143
3144 if (!be_lock_busy_poll(eqo))
3145 return LL_FLUSH_BUSY;
3146
3147 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3148 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3149 if (work)
3150 break;
3151 }
3152
3153 be_unlock_busy_poll(eqo);
3154 return work;
3155}
3156#endif
3157
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003158void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003159{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003160 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3161 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003162 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303163 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003164
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303165 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003166 return;
3167
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003168 if (lancer_chip(adapter)) {
3169 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3170 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303171 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003172 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303173 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003174 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303175 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303176 /* Do not log error messages if its a FW reset */
3177 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3178 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3179 dev_info(dev, "Firmware update in progress\n");
3180 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303181 dev_err(dev, "Error detected in the card\n");
3182 dev_err(dev, "ERR: sliport status 0x%x\n",
3183 sliport_status);
3184 dev_err(dev, "ERR: sliport error1 0x%x\n",
3185 sliport_err1);
3186 dev_err(dev, "ERR: sliport error2 0x%x\n",
3187 sliport_err2);
3188 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003189 }
3190 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003191 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3192 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3193 ue_lo_mask = ioread32(adapter->pcicfg +
3194 PCICFG_UE_STATUS_LOW_MASK);
3195 ue_hi_mask = ioread32(adapter->pcicfg +
3196 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003197
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003198 ue_lo = (ue_lo & ~ue_lo_mask);
3199 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003200
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303201 /* On certain platforms BE hardware can indicate spurious UEs.
3202 * Allow HW to stop working completely in case of a real UE.
3203 * Hence not setting the hw_error for UE detection.
3204 */
3205
3206 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303207 dev_err(dev,
3208 "Unrecoverable Error detected in the adapter");
3209 dev_err(dev, "Please reboot server to recover");
3210 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303211 be_set_error(adapter, BE_ERROR_UE);
3212
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303213 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3214 if (ue_lo & 1)
3215 dev_err(dev, "UE: %s bit set\n",
3216 ue_status_low_desc[i]);
3217 }
3218 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3219 if (ue_hi & 1)
3220 dev_err(dev, "UE: %s bit set\n",
3221 ue_status_hi_desc[i]);
3222 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303223 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003224 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003225}
3226
Sathya Perla8d56ff12009-11-22 22:02:26 +00003227static void be_msix_disable(struct be_adapter *adapter)
3228{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003229 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003230 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003231 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303232 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003233 }
3234}
3235
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003236static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003237{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003238 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003239 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003240
Sathya Perla92bf14a2013-08-27 16:57:32 +05303241 /* If RoCE is supported, program the max number of NIC vectors that
3242 * may be configured via set-channels, along with vectors needed for
3243 * RoCe. Else, just program the number we'll use initially.
3244 */
3245 if (be_roce_supported(adapter))
3246 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3247 2 * num_online_cpus());
3248 else
3249 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003250
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003251 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003252 adapter->msix_entries[i].entry = i;
3253
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003254 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3255 MIN_MSIX_VECTORS, num_vec);
3256 if (num_vec < 0)
3257 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003258
Sathya Perla92bf14a2013-08-27 16:57:32 +05303259 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3260 adapter->num_msix_roce_vec = num_vec / 2;
3261 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3262 adapter->num_msix_roce_vec);
3263 }
3264
3265 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3266
3267 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3268 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003269 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003270
3271fail:
3272 dev_warn(dev, "MSIx enable failed\n");
3273
3274 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003275 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003276 return num_vec;
3277 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003278}
3279
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003280static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303281 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003282{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303283 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003284}
3285
3286static int be_msix_register(struct be_adapter *adapter)
3287{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003288 struct net_device *netdev = adapter->netdev;
3289 struct be_eq_obj *eqo;
3290 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003291
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003292 for_all_evt_queues(adapter, eqo, i) {
3293 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3294 vec = be_msix_vec_get(adapter, eqo);
3295 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003296 if (status)
3297 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003298
3299 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003300 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003301
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003302 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003303err_msix:
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303304 for (i--; i >= 0; i--) {
3305 eqo = &adapter->eq_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003306 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Venkat Duvvuru6e3cd5f2015-12-18 01:40:50 +05303307 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003308 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303309 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003310 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003311 return status;
3312}
3313
3314static int be_irq_register(struct be_adapter *adapter)
3315{
3316 struct net_device *netdev = adapter->netdev;
3317 int status;
3318
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003319 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003320 status = be_msix_register(adapter);
3321 if (status == 0)
3322 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003323 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003324 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003325 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003326 }
3327
Sathya Perlae49cc342012-11-27 19:50:02 +00003328 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003329 netdev->irq = adapter->pdev->irq;
3330 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003331 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003332 if (status) {
3333 dev_err(&adapter->pdev->dev,
3334 "INTx request IRQ failed - err %d\n", status);
3335 return status;
3336 }
3337done:
3338 adapter->isr_registered = true;
3339 return 0;
3340}
3341
3342static void be_irq_unregister(struct be_adapter *adapter)
3343{
3344 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003345 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003346 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003347
3348 if (!adapter->isr_registered)
3349 return;
3350
3351 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003352 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003353 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003354 goto done;
3355 }
3356
3357 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003358 for_all_evt_queues(adapter, eqo, i) {
3359 vec = be_msix_vec_get(adapter, eqo);
3360 irq_set_affinity_hint(vec, NULL);
3361 free_irq(vec, eqo);
3362 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003363
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003364done:
3365 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003366}
3367
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003368static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003369{
Ajit Khaparde62219062016-02-10 22:45:53 +05303370 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00003371 struct be_queue_info *q;
3372 struct be_rx_obj *rxo;
3373 int i;
3374
3375 for_all_rx_queues(adapter, rxo, i) {
3376 q = &rxo->q;
3377 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003378 /* If RXQs are destroyed while in an "out of buffer"
3379 * state, there is a possibility of an HW stall on
3380 * Lancer. So, post 64 buffers to each queue to relieve
3381 * the "out of buffer" condition.
3382 * Make sure there's space in the RXQ before posting.
3383 */
3384 if (lancer_chip(adapter)) {
3385 be_rx_cq_clean(rxo);
3386 if (atomic_read(&q->used) == 0)
3387 be_post_rx_frags(rxo, GFP_KERNEL,
3388 MAX_RX_POST);
3389 }
3390
Sathya Perla482c9e72011-06-29 23:33:17 +00003391 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003392 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003393 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003394 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003395 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003396 }
Ajit Khaparde62219062016-02-10 22:45:53 +05303397
3398 if (rss->rss_flags) {
3399 rss->rss_flags = RSS_ENABLE_NONE;
3400 be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3401 128, rss->rss_hkey);
3402 }
Sathya Perla482c9e72011-06-29 23:33:17 +00003403}
3404
Kalesh APbcc84142015-08-05 03:27:48 -04003405static void be_disable_if_filters(struct be_adapter *adapter)
3406{
3407 be_cmd_pmac_del(adapter, adapter->if_handle,
3408 adapter->pmac_id[0], 0);
3409
3410 be_clear_uc_list(adapter);
3411
3412 /* The IFACE flags are enabled in the open path and cleared
3413 * in the close path. When a VF gets detached from the host and
3414 * assigned to a VM the following happens:
3415 * - VF's IFACE flags get cleared in the detach path
3416 * - IFACE create is issued by the VF in the attach path
3417 * Due to a bug in the BE3/Skyhawk-R FW
3418 * (Lancer FW doesn't have the bug), the IFACE capability flags
3419 * specified along with the IFACE create cmd issued by a VF are not
3420 * honoured by FW. As a consequence, if a *new* driver
3421 * (that enables/disables IFACE flags in open/close)
3422 * is loaded in the host and an *old* driver is * used by a VM/VF,
3423 * the IFACE gets created *without* the needed flags.
3424 * To avoid this, disable RX-filter flags only for Lancer.
3425 */
3426 if (lancer_chip(adapter)) {
3427 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3428 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3429 }
3430}
3431
Sathya Perla889cd4b2010-05-30 23:33:45 +00003432static int be_close(struct net_device *netdev)
3433{
3434 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003435 struct be_eq_obj *eqo;
3436 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003437
Kalesh APe1ad8e32014-04-14 16:12:41 +05303438 /* This protection is needed as be_close() may be called even when the
3439 * adapter is in cleared state (after eeh perm failure)
3440 */
3441 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3442 return 0;
3443
Kalesh APbcc84142015-08-05 03:27:48 -04003444 be_disable_if_filters(adapter);
3445
Ivan Veceradff345c52013-11-27 08:59:32 +01003446 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3447 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003448 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303449 be_disable_busy_poll(eqo);
3450 }
David S. Miller71237b62013-11-28 18:53:36 -05003451 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003452 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003453
3454 be_async_mcc_disable(adapter);
3455
3456 /* Wait for all pending tx completions to arrive so that
3457 * all tx skbs are freed.
3458 */
Sathya Perlafba87552013-05-08 02:05:50 +00003459 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303460 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003461
3462 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003463
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003464 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003465 if (msix_enabled(adapter))
3466 synchronize_irq(be_msix_vec_get(adapter, eqo));
3467 else
3468 synchronize_irq(netdev->irq);
3469 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003470 }
3471
Sathya Perla889cd4b2010-05-30 23:33:45 +00003472 be_irq_unregister(adapter);
3473
Sathya Perla482c9e72011-06-29 23:33:17 +00003474 return 0;
3475}
3476
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003477static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003478{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003479 struct rss_info *rss = &adapter->rss_info;
3480 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003481 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003482 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003483
3484 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003485 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3486 sizeof(struct be_eth_rx_d));
3487 if (rc)
3488 return rc;
3489 }
3490
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003491 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3492 rxo = default_rxo(adapter);
3493 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3494 rx_frag_size, adapter->if_handle,
3495 false, &rxo->rss_id);
3496 if (rc)
3497 return rc;
3498 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003499
3500 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003501 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003502 rx_frag_size, adapter->if_handle,
3503 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003504 if (rc)
3505 return rc;
3506 }
3507
3508 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003509 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003510 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303511 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003512 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303513 rss->rsstable[j + i] = rxo->rss_id;
3514 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003515 }
3516 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303517 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3518 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003519
3520 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303521 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3522 RSS_ENABLE_UDP_IPV6;
Ajit Khaparde62219062016-02-10 22:45:53 +05303523
3524 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3525 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3526 RSS_INDIR_TABLE_LEN, rss_key);
3527 if (rc) {
3528 rss->rss_flags = RSS_ENABLE_NONE;
3529 return rc;
3530 }
3531
3532 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303533 } else {
3534 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303535 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303536 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003537
Venkata Duvvurue2557872014-04-21 15:38:00 +05303538
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003539 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3540 * which is a queue empty condition
3541 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003542 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003543 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3544
Sathya Perla889cd4b2010-05-30 23:33:45 +00003545 return 0;
3546}
3547
Kalesh APbcc84142015-08-05 03:27:48 -04003548static int be_enable_if_filters(struct be_adapter *adapter)
3549{
3550 int status;
3551
3552 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3553 if (status)
3554 return status;
3555
3556 /* For BE3 VFs, the PF programs the initial MAC address */
3557 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3558 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3559 adapter->if_handle,
3560 &adapter->pmac_id[0], 0);
3561 if (status)
3562 return status;
3563 }
3564
3565 if (adapter->vlans_added)
3566 be_vid_config(adapter);
3567
3568 be_set_rx_mode(adapter->netdev);
3569
3570 return 0;
3571}
3572
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003573static int be_open(struct net_device *netdev)
3574{
3575 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003576 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003577 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003578 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003579 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003580 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003581
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003582 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003583 if (status)
3584 goto err;
3585
Kalesh APbcc84142015-08-05 03:27:48 -04003586 status = be_enable_if_filters(adapter);
3587 if (status)
3588 goto err;
3589
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003590 status = be_irq_register(adapter);
3591 if (status)
3592 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003593
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003594 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003595 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003596
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003597 for_all_tx_queues(adapter, txo, i)
3598 be_cq_notify(adapter, txo->cq.id, true, 0);
3599
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003600 be_async_mcc_enable(adapter);
3601
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003602 for_all_evt_queues(adapter, eqo, i) {
3603 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303604 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003605 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003606 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003607 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003608
Sathya Perla323ff712012-09-28 04:39:43 +00003609 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003610 if (!status)
3611 be_link_status_update(adapter, link_status);
3612
Sathya Perlafba87552013-05-08 02:05:50 +00003613 netif_tx_start_all_queues(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303614#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303615 if (skyhawk_chip(adapter))
3616 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303617#endif
3618
Sathya Perla889cd4b2010-05-30 23:33:45 +00003619 return 0;
3620err:
3621 be_close(adapter->netdev);
3622 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003623}
3624
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003625static int be_setup_wol(struct be_adapter *adapter, bool enable)
3626{
Kalesh Purayil145155e2015-07-10 05:32:43 -04003627 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003628 struct be_dma_mem cmd;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003629 u8 mac[ETH_ALEN];
Kalesh Purayil145155e2015-07-10 05:32:43 -04003630 int status;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003631
Joe Perchesc7bf7162015-03-02 19:54:47 -08003632 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003633
3634 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Kalesh Purayil145155e2015-07-10 05:32:43 -04003635 cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303636 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303637 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003638
3639 if (enable) {
3640 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303641 PCICFG_PM_CONTROL_OFFSET,
3642 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003643 if (status) {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003644 dev_err(dev, "Could not enable Wake-on-lan\n");
3645 goto err;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003646 }
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003647 } else {
Kalesh Purayil145155e2015-07-10 05:32:43 -04003648 ether_addr_copy(mac, adapter->netdev->dev_addr);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003649 }
3650
Kalesh Purayil145155e2015-07-10 05:32:43 -04003651 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3652 pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
3653 pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
3654err:
3655 dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003656 return status;
3657}
3658
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003659static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3660{
3661 u32 addr;
3662
3663 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3664
3665 mac[5] = (u8)(addr & 0xFF);
3666 mac[4] = (u8)((addr >> 8) & 0xFF);
3667 mac[3] = (u8)((addr >> 16) & 0xFF);
3668 /* Use the OUI from the current MAC address */
3669 memcpy(mac, adapter->netdev->dev_addr, 3);
3670}
3671
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003672/*
3673 * Generate a seed MAC address from the PF MAC Address using jhash.
3674 * MAC Address for VFs are assigned incrementally starting from the seed.
3675 * These addresses are programmed in the ASIC by the PF and the VF driver
3676 * queries for the MAC address during its probe.
3677 */
Sathya Perla4c876612013-02-03 20:30:11 +00003678static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003679{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003680 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003681 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003682 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003683 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003684
3685 be_vf_eth_addr_generate(adapter, mac);
3686
Sathya Perla11ac75e2011-12-13 00:58:50 +00003687 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303688 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003689 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003690 vf_cfg->if_handle,
3691 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303692 else
3693 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3694 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003695
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003696 if (status)
3697 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303698 "Mac address assignment failed for VF %d\n",
3699 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003700 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003701 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003702
3703 mac[5] += 1;
3704 }
3705 return status;
3706}
3707
Sathya Perla4c876612013-02-03 20:30:11 +00003708static int be_vfs_mac_query(struct be_adapter *adapter)
3709{
3710 int status, vf;
3711 u8 mac[ETH_ALEN];
3712 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003713
3714 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303715 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3716 mac, vf_cfg->if_handle,
3717 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003718 if (status)
3719 return status;
3720 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3721 }
3722 return 0;
3723}
3724
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003725static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003726{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003727 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003728 u32 vf;
3729
Sathya Perla257a3fe2013-06-14 15:54:51 +05303730 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003731 dev_warn(&adapter->pdev->dev,
3732 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003733 goto done;
3734 }
3735
Sathya Perlab4c1df92013-05-08 02:05:47 +00003736 pci_disable_sriov(adapter->pdev);
3737
Sathya Perla11ac75e2011-12-13 00:58:50 +00003738 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303739 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003740 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3741 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303742 else
3743 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3744 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003745
Sathya Perla11ac75e2011-12-13 00:58:50 +00003746 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3747 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003748done:
3749 kfree(adapter->vf_cfg);
3750 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303751 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003752}
3753
Sathya Perla77071332013-08-27 16:57:34 +05303754static void be_clear_queues(struct be_adapter *adapter)
3755{
3756 be_mcc_queues_destroy(adapter);
3757 be_rx_cqs_destroy(adapter);
3758 be_tx_queues_destroy(adapter);
3759 be_evt_queues_destroy(adapter);
3760}
3761
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303762static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003763{
Sathya Perla191eb752012-02-23 18:50:13 +00003764 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3765 cancel_delayed_work_sync(&adapter->work);
3766 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3767 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303768}
3769
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003770static void be_cancel_err_detection(struct be_adapter *adapter)
3771{
3772 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3773 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3774 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3775 }
3776}
3777
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303778#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303779static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3780{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003781 struct net_device *netdev = adapter->netdev;
3782
Sathya Perlac9c47142014-03-27 10:46:19 +05303783 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3784 be_cmd_manage_iface(adapter, adapter->if_handle,
3785 OP_CONVERT_TUNNEL_TO_NORMAL);
3786
3787 if (adapter->vxlan_port)
3788 be_cmd_set_vxlan_port(adapter, 0);
3789
3790 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3791 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003792
3793 netdev->hw_enc_features = 0;
3794 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303795 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303796}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303797#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303798
Vasundhara Volamf2858732015-03-04 00:44:33 -05003799static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3800{
3801 struct be_resources res = adapter->pool_res;
3802 u16 num_vf_qs = 1;
3803
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05303804 /* Distribute the queue resources among the PF and it's VFs
Vasundhara Volamf2858732015-03-04 00:44:33 -05003805 * Do not distribute queue resources in multi-channel configuration.
3806 */
3807 if (num_vfs && !be_is_mc(adapter)) {
Sriharsha Basavapatnaee9ad282016-02-03 09:49:19 +05303808 /* Divide the qpairs evenly among the VFs and the PF, capped
3809 * at VF-EQ-count. Any remainder qpairs belong to the PF.
3810 */
3811 num_vf_qs = min(SH_VF_MAX_NIC_EQS,
3812 res.max_rss_qs / (num_vfs + 1));
Vasundhara Volamf2858732015-03-04 00:44:33 -05003813
3814 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3815 * interfaces per port. Provide RSS on VFs, only if number
3816 * of VFs requested is less than MAX_RSS_IFACES limit.
3817 */
3818 if (num_vfs >= MAX_RSS_IFACES)
3819 num_vf_qs = 1;
3820 }
3821 return num_vf_qs;
3822}
3823
Somnath Koturb05004a2013-12-05 12:08:16 +05303824static int be_clear(struct be_adapter *adapter)
3825{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003826 struct pci_dev *pdev = adapter->pdev;
3827 u16 num_vf_qs;
3828
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303829 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003830
Sathya Perla11ac75e2011-12-13 00:58:50 +00003831 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003832 be_vf_clear(adapter);
3833
Vasundhara Volambec84e62014-06-30 13:01:32 +05303834 /* Re-configure FW to distribute resources evenly across max-supported
3835 * number of VFs, only when VFs are not already enabled.
3836 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003837 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3838 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003839 num_vf_qs = be_calculate_vf_qs(adapter,
3840 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303841 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003842 pci_sriov_get_totalvfs(pdev),
3843 num_vf_qs);
3844 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303845
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303846#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303847 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303848#endif
Kalesh APbcc84142015-08-05 03:27:48 -04003849 kfree(adapter->pmac_id);
3850 adapter->pmac_id = NULL;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003851
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003852 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003853
Sathya Perla77071332013-08-27 16:57:34 +05303854 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003855
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003856 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303857 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003858 return 0;
3859}
3860
Sathya Perla4c876612013-02-03 20:30:11 +00003861static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003862{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303863 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04003864 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00003865 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003866 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003867
Kalesh AP0700d812015-01-20 03:51:43 -05003868 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003869 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003870 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003871
Sathya Perla4c876612013-02-03 20:30:11 +00003872 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303873 if (!BE3_chip(adapter)) {
3874 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003875 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303876 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003877 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303878 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003879 /* Prevent VFs from enabling VLAN promiscuous
3880 * mode
3881 */
3882 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3883 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303884 }
Sathya Perla4c876612013-02-03 20:30:11 +00003885
Kalesh APbcc84142015-08-05 03:27:48 -04003886 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3887 BE_IF_FLAGS_BROADCAST |
3888 BE_IF_FLAGS_MULTICAST |
3889 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3890 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3891 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003892 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003893 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003894 }
Kalesh AP0700d812015-01-20 03:51:43 -05003895
3896 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003897}
3898
Sathya Perla39f1d942012-05-08 19:41:24 +00003899static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003900{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003901 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003902 int vf;
3903
Sathya Perla39f1d942012-05-08 19:41:24 +00003904 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3905 GFP_KERNEL);
3906 if (!adapter->vf_cfg)
3907 return -ENOMEM;
3908
Sathya Perla11ac75e2011-12-13 00:58:50 +00003909 for_all_vfs(adapter, vf_cfg, vf) {
3910 vf_cfg->if_handle = -1;
3911 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003912 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003913 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003914}
3915
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003916static int be_vf_setup(struct be_adapter *adapter)
3917{
Sathya Perla4c876612013-02-03 20:30:11 +00003918 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303919 struct be_vf_cfg *vf_cfg;
3920 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003921 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003922
Sathya Perla257a3fe2013-06-14 15:54:51 +05303923 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003924
3925 status = be_vf_setup_init(adapter);
3926 if (status)
3927 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003928
Sathya Perla4c876612013-02-03 20:30:11 +00003929 if (old_vfs) {
3930 for_all_vfs(adapter, vf_cfg, vf) {
3931 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3932 if (status)
3933 goto err;
3934 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003935
Sathya Perla4c876612013-02-03 20:30:11 +00003936 status = be_vfs_mac_query(adapter);
3937 if (status)
3938 goto err;
3939 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303940 status = be_vfs_if_create(adapter);
3941 if (status)
3942 goto err;
3943
Sathya Perla39f1d942012-05-08 19:41:24 +00003944 status = be_vf_eth_addr_config(adapter);
3945 if (status)
3946 goto err;
3947 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003948
Sathya Perla11ac75e2011-12-13 00:58:50 +00003949 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303950 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003951 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3952 vf + 1);
3953 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303954 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003955 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303956 BE_PRIV_FILTMGMT,
3957 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003958 if (!status) {
3959 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303960 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3961 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003962 }
Sathya Perla04a06022013-07-23 15:25:00 +05303963 }
3964
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303965 /* Allow full available bandwidth */
3966 if (!old_vfs)
3967 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003968
Kalesh APe7bcbd72015-05-06 05:30:32 -04003969 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3970 vf_cfg->if_handle, NULL,
3971 &spoofchk);
3972 if (!status)
3973 vf_cfg->spoofchk = spoofchk;
3974
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303975 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303976 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303977 be_cmd_set_logical_link_config(adapter,
3978 IFLA_VF_LINK_STATE_AUTO,
3979 vf+1);
3980 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003981 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003982
3983 if (!old_vfs) {
3984 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3985 if (status) {
3986 dev_err(dev, "SRIOV enable failed\n");
3987 adapter->num_vfs = 0;
3988 goto err;
3989 }
3990 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303991
3992 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003993 return 0;
3994err:
Sathya Perla4c876612013-02-03 20:30:11 +00003995 dev_err(dev, "VF setup failed\n");
3996 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003997 return status;
3998}
3999
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304000/* Converting function_mode bits on BE3 to SH mc_type enums */
4001
4002static u8 be_convert_mc_type(u32 function_mode)
4003{
Suresh Reddy66064db2014-06-23 16:41:29 +05304004 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304005 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304006 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304007 return FLEX10;
4008 else if (function_mode & VNIC_MODE)
4009 return vNIC2;
4010 else if (function_mode & UMC_ENABLED)
4011 return UMC;
4012 else
4013 return MC_NONE;
4014}
4015
Sathya Perla92bf14a2013-08-27 16:57:32 +05304016/* On BE2/BE3 FW does not suggest the supported limits */
4017static void BEx_get_resources(struct be_adapter *adapter,
4018 struct be_resources *res)
4019{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304020 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304021
4022 if (be_physfn(adapter))
4023 res->max_uc_mac = BE_UC_PMAC_COUNT;
4024 else
4025 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4026
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304027 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4028
4029 if (be_is_mc(adapter)) {
4030 /* Assuming that there are 4 channels per port,
4031 * when multi-channel is enabled
4032 */
4033 if (be_is_qnq_mode(adapter))
4034 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4035 else
4036 /* In a non-qnq multichannel mode, the pvid
4037 * takes up one vlan entry
4038 */
4039 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4040 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304041 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304042 }
4043
Sathya Perla92bf14a2013-08-27 16:57:32 +05304044 res->max_mcast_mac = BE_MAX_MC;
4045
Vasundhara Volama5243da2014-03-11 18:53:07 +05304046 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4047 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4048 * *only* if it is RSS-capable.
4049 */
4050 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004051 be_virtfn(adapter) ||
4052 (be_is_mc(adapter) &&
4053 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304054 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304055 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4056 struct be_resources super_nic_res = {0};
4057
4058 /* On a SuperNIC profile, the driver needs to use the
4059 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4060 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05004061 be_cmd_get_profile_config(adapter, &super_nic_res,
4062 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304063 /* Some old versions of BE3 FW don't report max_tx_qs value */
4064 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4065 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304066 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304067 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304068
4069 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4070 !use_sriov && be_physfn(adapter))
4071 res->max_rss_qs = (adapter->be3_native) ?
4072 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4073 res->max_rx_qs = res->max_rss_qs + 1;
4074
Suresh Reddye3dc8672014-01-06 13:02:25 +05304075 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304076 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304077 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4078 else
4079 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304080
4081 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004082 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304083 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4084 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4085}
4086
Sathya Perla30128032011-11-10 19:17:57 +00004087static void be_setup_init(struct be_adapter *adapter)
4088{
4089 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004090 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004091 adapter->if_handle = -1;
4092 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004093 adapter->if_flags = 0;
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304094 adapter->phy_state = BE_UNKNOWN_PHY_STATE;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004095 if (be_physfn(adapter))
4096 adapter->cmd_privileges = MAX_PRIVILEGES;
4097 else
4098 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004099}
4100
Vasundhara Volambec84e62014-06-30 13:01:32 +05304101static int be_get_sriov_config(struct be_adapter *adapter)
4102{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304103 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304104 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304105
Vasundhara Volamf2858732015-03-04 00:44:33 -05004106 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304107
Vasundhara Volamace40af2015-03-04 00:44:34 -05004108 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304109 if (BE3_chip(adapter) && !res.max_vfs) {
4110 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4111 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4112 }
4113
Sathya Perlad3d18312014-08-01 17:47:30 +05304114 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304115
Vasundhara Volamace40af2015-03-04 00:44:34 -05004116 /* If during previous unload of the driver, the VFs were not disabled,
4117 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4118 * Instead use the TotalVFs value stored in the pci-dev struct.
4119 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304120 old_vfs = pci_num_vf(adapter->pdev);
4121 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004122 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4123 old_vfs);
4124
4125 adapter->pool_res.max_vfs =
4126 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304127 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304128 }
4129
4130 return 0;
4131}
4132
Vasundhara Volamace40af2015-03-04 00:44:34 -05004133static void be_alloc_sriov_res(struct be_adapter *adapter)
4134{
4135 int old_vfs = pci_num_vf(adapter->pdev);
4136 u16 num_vf_qs;
4137 int status;
4138
4139 be_get_sriov_config(adapter);
4140
4141 if (!old_vfs)
4142 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4143
4144 /* When the HW is in SRIOV capable configuration, the PF-pool
4145 * resources are given to PF during driver load, if there are no
4146 * old VFs. This facility is not available in BE3 FW.
4147 * Also, this is done by FW in Lancer chip.
4148 */
4149 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4150 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4151 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4152 num_vf_qs);
4153 if (status)
4154 dev_err(&adapter->pdev->dev,
4155 "Failed to optimize SRIOV resources\n");
4156 }
4157}
4158
Sathya Perla92bf14a2013-08-27 16:57:32 +05304159static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004160{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304161 struct device *dev = &adapter->pdev->dev;
4162 struct be_resources res = {0};
4163 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004164
Sathya Perla92bf14a2013-08-27 16:57:32 +05304165 if (BEx_chip(adapter)) {
4166 BEx_get_resources(adapter, &res);
4167 adapter->res = res;
4168 }
4169
Sathya Perla92bf14a2013-08-27 16:57:32 +05304170 /* For Lancer, SH etc read per-function resource limits from FW.
4171 * GET_FUNC_CONFIG returns per function guaranteed limits.
4172 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4173 */
Sathya Perla4c876612013-02-03 20:30:11 +00004174 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304175 status = be_cmd_get_func_config(adapter, &res);
4176 if (status)
4177 return status;
4178
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004179 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4180 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4181 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4182 res.max_rss_qs -= 1;
4183
Sathya Perla92bf14a2013-08-27 16:57:32 +05304184 /* If RoCE may be enabled stash away half the EQs for RoCE */
4185 if (be_roce_supported(adapter))
4186 res.max_evt_qs /= 2;
4187 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004188 }
4189
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004190 /* If FW supports RSS default queue, then skip creating non-RSS
4191 * queue for non-IP traffic.
4192 */
4193 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4194 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4195
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304196 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4197 be_max_txqs(adapter), be_max_rxqs(adapter),
4198 be_max_rss(adapter), be_max_eqs(adapter),
4199 be_max_vfs(adapter));
4200 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4201 be_max_uc(adapter), be_max_mc(adapter),
4202 be_max_vlans(adapter));
4203
Vasundhara Volamace40af2015-03-04 00:44:34 -05004204 /* Sanitize cfg_num_qs based on HW and platform limits */
4205 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4206 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304207 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004208}
4209
Sathya Perla39f1d942012-05-08 19:41:24 +00004210static int be_get_config(struct be_adapter *adapter)
4211{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004212 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304213 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004214
Suresh Reddy980df242015-12-30 01:29:03 -05004215 status = be_cmd_get_cntl_attributes(adapter);
4216 if (status)
4217 return status;
4218
Kalesh APe97e3cd2014-07-17 16:20:26 +05304219 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004220 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304221 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004222
Venkat Duvvurufd7ff6f2015-12-30 01:29:04 -05004223 if (!lancer_chip(adapter) && be_physfn(adapter))
4224 be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4225
Sathya Perla6b085ba2015-02-23 04:20:09 -05004226 if (BEx_chip(adapter)) {
4227 level = be_cmd_get_fw_log_level(adapter);
4228 adapter->msg_enable =
4229 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4230 }
4231
4232 be_cmd_get_acpi_wol_cap(adapter);
4233
Vasundhara Volam21252372015-02-06 08:18:42 -05004234 be_cmd_query_port_name(adapter);
4235
4236 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304237 status = be_cmd_get_active_profile(adapter, &profile_id);
4238 if (!status)
4239 dev_info(&adapter->pdev->dev,
4240 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304241 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304242
Sathya Perla92bf14a2013-08-27 16:57:32 +05304243 status = be_get_resources(adapter);
4244 if (status)
4245 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004246
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304247 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4248 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304249 if (!adapter->pmac_id)
4250 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004251
Sathya Perla92bf14a2013-08-27 16:57:32 +05304252 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004253}
4254
Sathya Perla95046b92013-07-23 15:25:02 +05304255static int be_mac_setup(struct be_adapter *adapter)
4256{
4257 u8 mac[ETH_ALEN];
4258 int status;
4259
4260 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4261 status = be_cmd_get_perm_mac(adapter, mac);
4262 if (status)
4263 return status;
4264
4265 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4266 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304267 }
4268
Sathya Perla95046b92013-07-23 15:25:02 +05304269 return 0;
4270}
4271
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304272static void be_schedule_worker(struct be_adapter *adapter)
4273{
4274 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4275 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4276}
4277
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304278static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004279{
4280 schedule_delayed_work(&adapter->be_err_detection_work,
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304281 msecs_to_jiffies(delay));
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004282 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4283}
4284
Sathya Perla77071332013-08-27 16:57:34 +05304285static int be_setup_queues(struct be_adapter *adapter)
4286{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304287 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304288 int status;
4289
4290 status = be_evt_queues_create(adapter);
4291 if (status)
4292 goto err;
4293
4294 status = be_tx_qs_create(adapter);
4295 if (status)
4296 goto err;
4297
4298 status = be_rx_cqs_create(adapter);
4299 if (status)
4300 goto err;
4301
4302 status = be_mcc_queues_create(adapter);
4303 if (status)
4304 goto err;
4305
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304306 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4307 if (status)
4308 goto err;
4309
4310 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4311 if (status)
4312 goto err;
4313
Sathya Perla77071332013-08-27 16:57:34 +05304314 return 0;
4315err:
4316 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4317 return status;
4318}
4319
Ajit Khaparde62219062016-02-10 22:45:53 +05304320static int be_if_create(struct be_adapter *adapter)
4321{
4322 u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4323 u32 cap_flags = be_if_cap_flags(adapter);
4324 int status;
4325
4326 if (adapter->cfg_num_qs == 1)
4327 cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4328
4329 en_flags &= cap_flags;
4330 /* will enable all the needed filter flags in be_open() */
4331 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4332 &adapter->if_handle, 0);
4333
4334 return status;
4335}
4336
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304337int be_update_queues(struct be_adapter *adapter)
4338{
4339 struct net_device *netdev = adapter->netdev;
4340 int status;
4341
4342 if (netif_running(netdev))
4343 be_close(netdev);
4344
4345 be_cancel_worker(adapter);
4346
4347 /* If any vectors have been shared with RoCE we cannot re-program
4348 * the MSIx table.
4349 */
4350 if (!adapter->num_msix_roce_vec)
4351 be_msix_disable(adapter);
4352
4353 be_clear_queues(adapter);
Ajit Khaparde62219062016-02-10 22:45:53 +05304354 status = be_cmd_if_destroy(adapter, adapter->if_handle, 0);
4355 if (status)
4356 return status;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304357
4358 if (!msix_enabled(adapter)) {
4359 status = be_msix_enable(adapter);
4360 if (status)
4361 return status;
4362 }
4363
Ajit Khaparde62219062016-02-10 22:45:53 +05304364 status = be_if_create(adapter);
4365 if (status)
4366 return status;
4367
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304368 status = be_setup_queues(adapter);
4369 if (status)
4370 return status;
4371
4372 be_schedule_worker(adapter);
4373
4374 if (netif_running(netdev))
4375 status = be_open(netdev);
4376
4377 return status;
4378}
4379
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004380static inline int fw_major_num(const char *fw_ver)
4381{
4382 int fw_major = 0, i;
4383
4384 i = sscanf(fw_ver, "%d.", &fw_major);
4385 if (i != 1)
4386 return 0;
4387
4388 return fw_major;
4389}
4390
Sathya Perlaf962f842015-02-23 04:20:16 -05004391/* If any VFs are already enabled don't FLR the PF */
4392static bool be_reset_required(struct be_adapter *adapter)
4393{
4394 return pci_num_vf(adapter->pdev) ? false : true;
4395}
4396
4397/* Wait for the FW to be ready and perform the required initialization */
4398static int be_func_init(struct be_adapter *adapter)
4399{
4400 int status;
4401
4402 status = be_fw_wait_ready(adapter);
4403 if (status)
4404 return status;
4405
4406 if (be_reset_required(adapter)) {
4407 status = be_cmd_reset_function(adapter);
4408 if (status)
4409 return status;
4410
4411 /* Wait for interrupts to quiesce after an FLR */
4412 msleep(100);
4413
4414 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304415 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004416 }
4417
4418 /* Tell FW we're ready to fire cmds */
4419 status = be_cmd_fw_init(adapter);
4420 if (status)
4421 return status;
4422
4423 /* Allow interrupts for other ULPs running on NIC function */
4424 be_intr_set(adapter, true);
4425
4426 return 0;
4427}
4428
Sathya Perla5fb379e2009-06-18 00:02:59 +00004429static int be_setup(struct be_adapter *adapter)
4430{
Sathya Perla39f1d942012-05-08 19:41:24 +00004431 struct device *dev = &adapter->pdev->dev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004432 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004433
Sathya Perlaf962f842015-02-23 04:20:16 -05004434 status = be_func_init(adapter);
4435 if (status)
4436 return status;
4437
Sathya Perla30128032011-11-10 19:17:57 +00004438 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004439
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004440 if (!lancer_chip(adapter))
4441 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004442
Suresh Reddy980df242015-12-30 01:29:03 -05004443 /* invoke this cmd first to get pf_num and vf_num which are needed
4444 * for issuing profile related cmds
4445 */
4446 if (!BEx_chip(adapter)) {
4447 status = be_cmd_get_func_config(adapter, NULL);
4448 if (status)
4449 return status;
4450 }
Somnath Kotur72ef3a82015-10-12 03:47:20 -04004451
Vasundhara Volamace40af2015-03-04 00:44:34 -05004452 if (!BE2_chip(adapter) && be_physfn(adapter))
4453 be_alloc_sriov_res(adapter);
4454
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004455 status = be_get_config(adapter);
4456 if (status)
4457 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004458
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004459 status = be_msix_enable(adapter);
4460 if (status)
4461 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004462
Kalesh APbcc84142015-08-05 03:27:48 -04004463 /* will enable all the needed filter flags in be_open() */
Ajit Khaparde62219062016-02-10 22:45:53 +05304464 status = be_if_create(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004465 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004466 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004467
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304468 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4469 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304470 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304471 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004472 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004473 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004474
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004475 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004476
Sathya Perla95046b92013-07-23 15:25:02 +05304477 status = be_mac_setup(adapter);
4478 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004479 goto err;
4480
Kalesh APe97e3cd2014-07-17 16:20:26 +05304481 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304482 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004483
Somnath Koture9e2a902013-10-24 14:37:53 +05304484 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304485 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304486 adapter->fw_ver);
4487 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4488 }
4489
Kalesh AP00d594c2015-01-20 03:51:44 -05004490 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4491 adapter->rx_fc);
4492 if (status)
4493 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4494 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004495
Kalesh AP00d594c2015-01-20 03:51:44 -05004496 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4497 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004498
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304499 if (be_physfn(adapter))
4500 be_cmd_set_logical_link_config(adapter,
4501 IFLA_VF_LINK_STATE_AUTO, 0);
4502
Vasundhara Volambec84e62014-06-30 13:01:32 +05304503 if (adapter->num_vfs)
4504 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004505
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004506 status = be_cmd_get_phy_info(adapter);
4507 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004508 adapter->phy.fc_autoneg = 1;
4509
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304510 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304511 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004512 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004513err:
4514 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004515 return status;
4516}
4517
Ivan Vecera66268732011-12-08 01:31:21 +00004518#ifdef CONFIG_NET_POLL_CONTROLLER
4519static void be_netpoll(struct net_device *netdev)
4520{
4521 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004522 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004523 int i;
4524
Sathya Perlae49cc342012-11-27 19:50:02 +00004525 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004526 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004527 napi_schedule(&eqo->napi);
4528 }
Ivan Vecera66268732011-12-08 01:31:21 +00004529}
4530#endif
4531
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004532int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4533{
4534 const struct firmware *fw;
4535 int status;
4536
4537 if (!netif_running(adapter->netdev)) {
4538 dev_err(&adapter->pdev->dev,
4539 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304540 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004541 }
4542
4543 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4544 if (status)
4545 goto fw_exit;
4546
4547 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4548
4549 if (lancer_chip(adapter))
4550 status = lancer_fw_download(adapter, fw);
4551 else
4552 status = be_fw_download(adapter, fw);
4553
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004554 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304555 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004556
Ajit Khaparde84517482009-09-04 03:12:16 +00004557fw_exit:
4558 release_firmware(fw);
4559 return status;
4560}
4561
Roopa Prabhuadd511b2015-01-29 22:40:12 -08004562static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4563 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004564{
4565 struct be_adapter *adapter = netdev_priv(dev);
4566 struct nlattr *attr, *br_spec;
4567 int rem;
4568 int status = 0;
4569 u16 mode = 0;
4570
4571 if (!sriov_enabled(adapter))
4572 return -EOPNOTSUPP;
4573
4574 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004575 if (!br_spec)
4576 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004577
4578 nla_for_each_nested(attr, br_spec, rem) {
4579 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4580 continue;
4581
Thomas Grafb7c1a312014-11-26 13:42:17 +01004582 if (nla_len(attr) < sizeof(mode))
4583 return -EINVAL;
4584
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004585 mode = nla_get_u16(attr);
Suresh Reddyac0f5fb2015-12-30 01:28:57 -05004586 if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
4587 return -EOPNOTSUPP;
4588
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004589 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4590 return -EINVAL;
4591
4592 status = be_cmd_set_hsw_config(adapter, 0, 0,
4593 adapter->if_handle,
4594 mode == BRIDGE_MODE_VEPA ?
4595 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04004596 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004597 if (status)
4598 goto err;
4599
4600 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4601 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4602
4603 return status;
4604 }
4605err:
4606 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4607 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4608
4609 return status;
4610}
4611
4612static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02004613 struct net_device *dev, u32 filter_mask,
4614 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004615{
4616 struct be_adapter *adapter = netdev_priv(dev);
4617 int status = 0;
4618 u8 hsw_mode;
4619
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004620 /* BE and Lancer chips support VEB mode only */
4621 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Ivan Vecera84317062016-02-11 12:42:26 +01004622 /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
4623 if (!pci_sriov_get_totalvfs(adapter->pdev))
4624 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004625 hsw_mode = PORT_FWD_TYPE_VEB;
4626 } else {
4627 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004628 adapter->if_handle, &hsw_mode,
4629 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004630 if (status)
4631 return 0;
Kalesh Purayilff9ed192015-07-10 05:32:44 -04004632
4633 if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
4634 return 0;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004635 }
4636
4637 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4638 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004639 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07004640 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004641}
4642
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304643#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004644/* VxLAN offload Notes:
4645 *
4646 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4647 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4648 * is expected to work across all types of IP tunnels once exported. Skyhawk
4649 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304650 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
4651 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
4652 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004653 *
4654 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4655 * adds more than one port, disable offloads and don't re-enable them again
4656 * until after all the tunnels are removed.
4657 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304658static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4659 __be16 port)
4660{
4661 struct be_adapter *adapter = netdev_priv(netdev);
4662 struct device *dev = &adapter->pdev->dev;
4663 int status;
4664
Ivan Veceraaf19e682015-08-14 22:30:01 +02004665 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304666 return;
4667
Jiri Benc1e5b3112015-09-17 16:11:13 +02004668 if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
4669 adapter->vxlan_port_aliases++;
4670 return;
4671 }
4672
Sathya Perlac9c47142014-03-27 10:46:19 +05304673 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304674 dev_info(dev,
4675 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004676 dev_info(dev, "Disabling VxLAN offloads\n");
4677 adapter->vxlan_port_count++;
4678 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304679 }
4680
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004681 if (adapter->vxlan_port_count++ >= 1)
4682 return;
4683
Sathya Perlac9c47142014-03-27 10:46:19 +05304684 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4685 OP_CONVERT_NORMAL_TO_TUNNEL);
4686 if (status) {
4687 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4688 goto err;
4689 }
4690
4691 status = be_cmd_set_vxlan_port(adapter, port);
4692 if (status) {
4693 dev_warn(dev, "Failed to add VxLAN port\n");
4694 goto err;
4695 }
4696 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4697 adapter->vxlan_port = port;
4698
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004699 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4700 NETIF_F_TSO | NETIF_F_TSO6 |
4701 NETIF_F_GSO_UDP_TUNNEL;
4702 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304703 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004704
Sathya Perlac9c47142014-03-27 10:46:19 +05304705 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4706 be16_to_cpu(port));
4707 return;
4708err:
4709 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304710}
4711
4712static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4713 __be16 port)
4714{
4715 struct be_adapter *adapter = netdev_priv(netdev);
4716
Ivan Veceraaf19e682015-08-14 22:30:01 +02004717 if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
Sathya Perlac9c47142014-03-27 10:46:19 +05304718 return;
4719
4720 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004721 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304722
Jiri Benc1e5b3112015-09-17 16:11:13 +02004723 if (adapter->vxlan_port_aliases) {
4724 adapter->vxlan_port_aliases--;
4725 return;
4726 }
4727
Sathya Perlac9c47142014-03-27 10:46:19 +05304728 be_disable_vxlan_offloads(adapter);
4729
4730 dev_info(&adapter->pdev->dev,
4731 "Disabled VxLAN offloads for UDP port %d\n",
4732 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004733done:
4734 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304735}
Joe Stringer725d5482014-11-13 16:38:13 -08004736
Jesse Gross5f352272014-12-23 22:37:26 -08004737static netdev_features_t be_features_check(struct sk_buff *skb,
4738 struct net_device *dev,
4739 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004740{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304741 struct be_adapter *adapter = netdev_priv(dev);
4742 u8 l4_hdr = 0;
4743
4744 /* The code below restricts offload features for some tunneled packets.
4745 * Offload features for normal (non tunnel) packets are unchanged.
4746 */
4747 if (!skb->encapsulation ||
4748 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
4749 return features;
4750
4751 /* It's an encapsulated packet and VxLAN offloads are enabled. We
4752 * should disable tunnel offload features if it's not a VxLAN packet,
4753 * as tunnel offloads have been enabled only for VxLAN. This is done to
4754 * allow other tunneled traffic like GRE work fine while VxLAN
4755 * offloads are configured in Skyhawk-R.
4756 */
4757 switch (vlan_get_protocol(skb)) {
4758 case htons(ETH_P_IP):
4759 l4_hdr = ip_hdr(skb)->protocol;
4760 break;
4761 case htons(ETH_P_IPV6):
4762 l4_hdr = ipv6_hdr(skb)->nexthdr;
4763 break;
4764 default:
4765 return features;
4766 }
4767
4768 if (l4_hdr != IPPROTO_UDP ||
4769 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
4770 skb->inner_protocol != htons(ETH_P_TEB) ||
4771 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
4772 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
Tom Herberta1882222015-12-14 11:19:43 -08004773 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05304774
4775 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08004776}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304777#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304778
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304779static int be_get_phys_port_id(struct net_device *dev,
4780 struct netdev_phys_item_id *ppid)
4781{
4782 int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
4783 struct be_adapter *adapter = netdev_priv(dev);
4784 u8 *id;
4785
4786 if (MAX_PHYS_ITEM_ID_LEN < id_len)
4787 return -ENOSPC;
4788
4789 ppid->id[0] = adapter->hba_port_num + 1;
4790 id = &ppid->id[1];
4791 for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
4792 i--, id += CNTL_SERIAL_NUM_WORD_SZ)
4793 memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
4794
4795 ppid->id_len = id_len;
4796
4797 return 0;
4798}
4799
stephen hemmingere5686ad2012-01-05 19:10:25 +00004800static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004801 .ndo_open = be_open,
4802 .ndo_stop = be_close,
4803 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004804 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004805 .ndo_set_mac_address = be_mac_addr_set,
4806 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004807 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004808 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004809 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4810 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004811 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004812 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004813 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004814 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304815 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04004816 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00004817#ifdef CONFIG_NET_POLL_CONTROLLER
4818 .ndo_poll_controller = be_netpoll,
4819#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004820 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4821 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304822#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304823 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304824#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304825#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304826 .ndo_add_vxlan_port = be_add_vxlan_port,
4827 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004828 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304829#endif
Sriharsha Basavapatnaa155a5d2015-07-22 11:15:12 +05304830 .ndo_get_phys_port_id = be_get_phys_port_id,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004831};
4832
4833static void be_netdev_init(struct net_device *netdev)
4834{
4835 struct be_adapter *adapter = netdev_priv(netdev);
4836
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004837 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004838 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004839 NETIF_F_HW_VLAN_CTAG_TX;
Ajit Khaparde62219062016-02-10 22:45:53 +05304840 if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004841 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004842
4843 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004844 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004845
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004846 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004847 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004848
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004849 netdev->priv_flags |= IFF_UNICAST_FLT;
4850
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004851 netdev->flags |= IFF_MULTICAST;
4852
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004853 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004854
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004855 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004856
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004857 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004858}
4859
Kalesh AP87ac1a52015-02-23 04:20:15 -05004860static void be_cleanup(struct be_adapter *adapter)
4861{
4862 struct net_device *netdev = adapter->netdev;
4863
4864 rtnl_lock();
4865 netif_device_detach(netdev);
4866 if (netif_running(netdev))
4867 be_close(netdev);
4868 rtnl_unlock();
4869
4870 be_clear(adapter);
4871}
4872
Kalesh AP484d76f2015-02-23 04:20:14 -05004873static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004874{
Kalesh APd0e1b312015-02-23 04:20:12 -05004875 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004876 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004877
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004878 status = be_setup(adapter);
4879 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05004880 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004881
Kalesh APd0e1b312015-02-23 04:20:12 -05004882 if (netif_running(netdev)) {
4883 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004884 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05004885 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004886 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004887
Kalesh APd0e1b312015-02-23 04:20:12 -05004888 netif_device_attach(netdev);
4889
Kalesh AP484d76f2015-02-23 04:20:14 -05004890 return 0;
4891}
4892
4893static int be_err_recover(struct be_adapter *adapter)
4894{
Kalesh AP484d76f2015-02-23 04:20:14 -05004895 int status;
4896
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304897 /* Error recovery is supported only Lancer as of now */
4898 if (!lancer_chip(adapter))
4899 return -EIO;
4900
4901 /* Wait for adapter to reach quiescent state before
4902 * destroying queues
4903 */
4904 status = be_fw_wait_ready(adapter);
4905 if (status)
4906 goto err;
4907
4908 be_cleanup(adapter);
4909
Kalesh AP484d76f2015-02-23 04:20:14 -05004910 status = be_resume(adapter);
4911 if (status)
4912 goto err;
4913
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004914 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004915err:
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004916 return status;
4917}
4918
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004919static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004920{
4921 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004922 container_of(work, struct be_adapter,
4923 be_err_detection_work.work);
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304924 struct device *dev = &adapter->pdev->dev;
4925 int recovery_status;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304926 int delay = ERR_DETECTION_DELAY;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004927
4928 be_detect_error(adapter);
4929
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304930 if (be_check_error(adapter, BE_ERROR_HW))
4931 recovery_status = be_err_recover(adapter);
4932 else
4933 goto reschedule_task;
Kalesh APd0e1b312015-02-23 04:20:12 -05004934
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304935 if (!recovery_status) {
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304936 adapter->recovery_retries = 0;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304937 dev_info(dev, "Adapter recovery successful\n");
4938 goto reschedule_task;
4939 } else if (be_virtfn(adapter)) {
4940 /* For VFs, check if PF have allocated resources
4941 * every second.
4942 */
4943 dev_err(dev, "Re-trying adapter recovery\n");
4944 goto reschedule_task;
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304945 } else if (adapter->recovery_retries++ <
4946 MAX_ERR_RECOVERY_RETRY_COUNT) {
4947 /* In case of another error during recovery, it takes 30 sec
4948 * for adapter to come out of error. Retry error recovery after
4949 * this time interval.
4950 */
4951 dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
4952 delay = ERR_RECOVERY_RETRY_DELAY;
4953 goto reschedule_task;
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304954 } else {
4955 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004956 }
4957
Padmanabh Ratnakar1babbad2016-02-03 09:49:21 +05304958 return;
4959reschedule_task:
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05304960 be_schedule_err_detection(adapter, delay);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004961}
4962
Vasundhara Volam21252372015-02-06 08:18:42 -05004963static void be_log_sfp_info(struct be_adapter *adapter)
4964{
4965 int status;
4966
4967 status = be_cmd_query_sfp_info(adapter);
4968 if (!status) {
4969 dev_err(&adapter->pdev->dev,
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304970 "Port %c: %s Vendor: %s part no: %s",
4971 adapter->port_name,
4972 be_misconfig_evt_port_state[adapter->phy_state],
4973 adapter->phy.vendor_name,
Vasundhara Volam21252372015-02-06 08:18:42 -05004974 adapter->phy.vendor_pn);
4975 }
Ajit Khaparde51d1f982016-02-10 22:45:54 +05304976 adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
Vasundhara Volam21252372015-02-06 08:18:42 -05004977}
4978
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004979static void be_worker(struct work_struct *work)
4980{
4981 struct be_adapter *adapter =
4982 container_of(work, struct be_adapter, work.work);
4983 struct be_rx_obj *rxo;
4984 int i;
4985
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004986 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05004987 * mcc completions
4988 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004989 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004990 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004991 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004992 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004993 goto reschedule;
4994 }
4995
4996 if (!adapter->stats_cmd_sent) {
4997 if (lancer_chip(adapter))
4998 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05304999 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005000 else
5001 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5002 }
5003
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305004 if (be_physfn(adapter) &&
5005 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005006 be_cmd_get_die_temperature(adapter);
5007
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005008 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305009 /* Replenish RX-queues starved due to memory
5010 * allocation failures.
5011 */
5012 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305013 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005014 }
5015
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005016 /* EQ-delay update for Skyhawk is done while notifying EQ */
5017 if (!skyhawk_chip(adapter))
5018 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005019
Ajit Khaparde51d1f982016-02-10 22:45:54 +05305020 if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
Vasundhara Volam21252372015-02-06 08:18:42 -05005021 be_log_sfp_info(adapter);
5022
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005023reschedule:
5024 adapter->work_counter++;
5025 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5026}
5027
Sathya Perla78fad34e2015-02-23 04:20:08 -05005028static void be_unmap_pci_bars(struct be_adapter *adapter)
5029{
5030 if (adapter->csr)
5031 pci_iounmap(adapter->pdev, adapter->csr);
5032 if (adapter->db)
5033 pci_iounmap(adapter->pdev, adapter->db);
5034}
5035
5036static int db_bar(struct be_adapter *adapter)
5037{
Kalesh AP18c57c72015-05-06 05:30:38 -04005038 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005039 return 0;
5040 else
5041 return 4;
5042}
5043
5044static int be_roce_map_pci_bars(struct be_adapter *adapter)
5045{
5046 if (skyhawk_chip(adapter)) {
5047 adapter->roce_db.size = 4096;
5048 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5049 db_bar(adapter));
5050 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5051 db_bar(adapter));
5052 }
5053 return 0;
5054}
5055
5056static int be_map_pci_bars(struct be_adapter *adapter)
5057{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005058 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005059 u8 __iomem *addr;
5060 u32 sli_intf;
5061
5062 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5063 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5064 SLI_INTF_FAMILY_SHIFT;
5065 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5066
5067 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005068 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005069 if (!adapter->csr)
5070 return -ENOMEM;
5071 }
5072
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005073 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005074 if (!addr)
5075 goto pci_map_err;
5076 adapter->db = addr;
5077
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005078 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5079 if (be_physfn(adapter)) {
5080 /* PCICFG is the 2nd BAR in BE2 */
5081 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5082 if (!addr)
5083 goto pci_map_err;
5084 adapter->pcicfg = addr;
5085 } else {
5086 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5087 }
5088 }
5089
Sathya Perla78fad34e2015-02-23 04:20:08 -05005090 be_roce_map_pci_bars(adapter);
5091 return 0;
5092
5093pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005094 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005095 be_unmap_pci_bars(adapter);
5096 return -ENOMEM;
5097}
5098
5099static void be_drv_cleanup(struct be_adapter *adapter)
5100{
5101 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5102 struct device *dev = &adapter->pdev->dev;
5103
5104 if (mem->va)
5105 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5106
5107 mem = &adapter->rx_filter;
5108 if (mem->va)
5109 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5110
5111 mem = &adapter->stats_cmd;
5112 if (mem->va)
5113 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5114}
5115
5116/* Allocate and initialize various fields in be_adapter struct */
5117static int be_drv_init(struct be_adapter *adapter)
5118{
5119 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5120 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5121 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5122 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5123 struct device *dev = &adapter->pdev->dev;
5124 int status = 0;
5125
5126 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305127 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5128 &mbox_mem_alloc->dma,
5129 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005130 if (!mbox_mem_alloc->va)
5131 return -ENOMEM;
5132
5133 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5134 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5135 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005136
5137 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5138 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5139 &rx_filter->dma, GFP_KERNEL);
5140 if (!rx_filter->va) {
5141 status = -ENOMEM;
5142 goto free_mbox;
5143 }
5144
5145 if (lancer_chip(adapter))
5146 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5147 else if (BE2_chip(adapter))
5148 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5149 else if (BE3_chip(adapter))
5150 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5151 else
5152 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5153 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5154 &stats_cmd->dma, GFP_KERNEL);
5155 if (!stats_cmd->va) {
5156 status = -ENOMEM;
5157 goto free_rx_filter;
5158 }
5159
5160 mutex_init(&adapter->mbox_lock);
5161 spin_lock_init(&adapter->mcc_lock);
5162 spin_lock_init(&adapter->mcc_cq_lock);
5163 init_completion(&adapter->et_cmd_compl);
5164
5165 pci_save_state(adapter->pdev);
5166
5167 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005168 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5169 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005170
5171 adapter->rx_fc = true;
5172 adapter->tx_fc = true;
5173
5174 /* Must be a power of 2 or else MODULO will BUG_ON */
5175 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005176
5177 return 0;
5178
5179free_rx_filter:
5180 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5181free_mbox:
5182 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5183 mbox_mem_alloc->dma);
5184 return status;
5185}
5186
5187static void be_remove(struct pci_dev *pdev)
5188{
5189 struct be_adapter *adapter = pci_get_drvdata(pdev);
5190
5191 if (!adapter)
5192 return;
5193
5194 be_roce_dev_remove(adapter);
5195 be_intr_set(adapter, false);
5196
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005197 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005198
5199 unregister_netdev(adapter->netdev);
5200
5201 be_clear(adapter);
5202
5203 /* tell fw we're done with firing cmds */
5204 be_cmd_fw_clean(adapter);
5205
5206 be_unmap_pci_bars(adapter);
5207 be_drv_cleanup(adapter);
5208
5209 pci_disable_pcie_error_reporting(pdev);
5210
5211 pci_release_regions(pdev);
5212 pci_disable_device(pdev);
5213
5214 free_netdev(adapter->netdev);
5215}
5216
Arnd Bergmann9a032592015-05-18 23:06:45 +02005217static ssize_t be_hwmon_show_temp(struct device *dev,
5218 struct device_attribute *dev_attr,
5219 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305220{
5221 struct be_adapter *adapter = dev_get_drvdata(dev);
5222
5223 /* Unit: millidegree Celsius */
5224 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5225 return -EIO;
5226 else
5227 return sprintf(buf, "%u\n",
5228 adapter->hwmon_info.be_on_die_temp * 1000);
5229}
5230
5231static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5232 be_hwmon_show_temp, NULL, 1);
5233
5234static struct attribute *be_hwmon_attrs[] = {
5235 &sensor_dev_attr_temp1_input.dev_attr.attr,
5236 NULL
5237};
5238
5239ATTRIBUTE_GROUPS(be_hwmon);
5240
Sathya Perlad3791422012-09-28 04:39:44 +00005241static char *mc_name(struct be_adapter *adapter)
5242{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305243 char *str = ""; /* default */
5244
5245 switch (adapter->mc_type) {
5246 case UMC:
5247 str = "UMC";
5248 break;
5249 case FLEX10:
5250 str = "FLEX10";
5251 break;
5252 case vNIC1:
5253 str = "vNIC-1";
5254 break;
5255 case nPAR:
5256 str = "nPAR";
5257 break;
5258 case UFP:
5259 str = "UFP";
5260 break;
5261 case vNIC2:
5262 str = "vNIC-2";
5263 break;
5264 default:
5265 str = "";
5266 }
5267
5268 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005269}
5270
5271static inline char *func_name(struct be_adapter *adapter)
5272{
5273 return be_physfn(adapter) ? "PF" : "VF";
5274}
5275
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005276static inline char *nic_name(struct pci_dev *pdev)
5277{
5278 switch (pdev->device) {
5279 case OC_DEVICE_ID1:
5280 return OC_NAME;
5281 case OC_DEVICE_ID2:
5282 return OC_NAME_BE;
5283 case OC_DEVICE_ID3:
5284 case OC_DEVICE_ID4:
5285 return OC_NAME_LANCER;
5286 case BE_DEVICE_ID2:
5287 return BE3_NAME;
5288 case OC_DEVICE_ID5:
5289 case OC_DEVICE_ID6:
5290 return OC_NAME_SH;
5291 default:
5292 return BE_NAME;
5293 }
5294}
5295
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005296static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005297{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005298 struct be_adapter *adapter;
5299 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005300 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005301
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305302 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5303
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005304 status = pci_enable_device(pdev);
5305 if (status)
5306 goto do_none;
5307
5308 status = pci_request_regions(pdev, DRV_NAME);
5309 if (status)
5310 goto disable_dev;
5311 pci_set_master(pdev);
5312
Sathya Perla7f640062012-06-05 19:37:20 +00005313 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305314 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005315 status = -ENOMEM;
5316 goto rel_reg;
5317 }
5318 adapter = netdev_priv(netdev);
5319 adapter->pdev = pdev;
5320 pci_set_drvdata(pdev, adapter);
5321 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005322 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005323
Russell King4c15c242013-06-26 23:49:11 +01005324 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005325 if (!status) {
5326 netdev->features |= NETIF_F_HIGHDMA;
5327 } else {
Russell King4c15c242013-06-26 23:49:11 +01005328 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005329 if (status) {
5330 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5331 goto free_netdev;
5332 }
5333 }
5334
Kalesh AP2f951a92014-09-12 17:39:21 +05305335 status = pci_enable_pcie_error_reporting(pdev);
5336 if (!status)
5337 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005338
Sathya Perla78fad34e2015-02-23 04:20:08 -05005339 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005340 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005341 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005342
Sathya Perla78fad34e2015-02-23 04:20:08 -05005343 status = be_drv_init(adapter);
5344 if (status)
5345 goto unmap_bars;
5346
Sathya Perla5fb379e2009-06-18 00:02:59 +00005347 status = be_setup(adapter);
5348 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005349 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005350
Sathya Perla3abcded2010-10-03 22:12:27 -07005351 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005352 status = register_netdev(netdev);
5353 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005354 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005355
Parav Pandit045508a2012-03-26 14:27:13 +00005356 be_roce_dev_add(adapter);
5357
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305358 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005359
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305360 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005361 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305362 adapter->hwmon_info.hwmon_dev =
5363 devm_hwmon_device_register_with_groups(&pdev->dev,
5364 DRV_NAME,
5365 adapter,
5366 be_hwmon_groups);
5367 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5368 }
5369
Sathya Perlad3791422012-09-28 04:39:44 +00005370 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005371 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005372
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005373 return 0;
5374
Sathya Perla5fb379e2009-06-18 00:02:59 +00005375unsetup:
5376 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005377drv_cleanup:
5378 be_drv_cleanup(adapter);
5379unmap_bars:
5380 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005381free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005382 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005383rel_reg:
5384 pci_release_regions(pdev);
5385disable_dev:
5386 pci_disable_device(pdev);
5387do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005388 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005389 return status;
5390}
5391
5392static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5393{
5394 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005395
Suresh Reddy76a9e082014-01-15 13:23:40 +05305396 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005397 be_setup_wol(adapter, true);
5398
Ajit Khaparded4360d62013-11-22 12:51:09 -06005399 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005400 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005401
Kalesh AP87ac1a52015-02-23 04:20:15 -05005402 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005403
5404 pci_save_state(pdev);
5405 pci_disable_device(pdev);
5406 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5407 return 0;
5408}
5409
Kalesh AP484d76f2015-02-23 04:20:14 -05005410static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005411{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005412 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005413 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005414
5415 status = pci_enable_device(pdev);
5416 if (status)
5417 return status;
5418
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005419 pci_restore_state(pdev);
5420
Kalesh AP484d76f2015-02-23 04:20:14 -05005421 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005422 if (status)
5423 return status;
5424
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305425 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005426
Suresh Reddy76a9e082014-01-15 13:23:40 +05305427 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005428 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005429
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005430 return 0;
5431}
5432
Sathya Perla82456b02010-02-17 01:35:37 +00005433/*
5434 * An FLR will stop BE from DMAing any data.
5435 */
5436static void be_shutdown(struct pci_dev *pdev)
5437{
5438 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005439
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005440 if (!adapter)
5441 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005442
Devesh Sharmad114f992014-06-10 19:32:15 +05305443 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005444 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005445 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005446
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005447 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005448
Ajit Khaparde57841862011-04-06 18:08:43 +00005449 be_cmd_reset_function(adapter);
5450
Sathya Perla82456b02010-02-17 01:35:37 +00005451 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005452}
5453
Sathya Perlacf588472010-02-14 21:22:01 +00005454static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305455 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005456{
5457 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005458
5459 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5460
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05305461 be_roce_dev_remove(adapter);
5462
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305463 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5464 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005465
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005466 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005467
Kalesh AP87ac1a52015-02-23 04:20:15 -05005468 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005469 }
Sathya Perlacf588472010-02-14 21:22:01 +00005470
5471 if (state == pci_channel_io_perm_failure)
5472 return PCI_ERS_RESULT_DISCONNECT;
5473
5474 pci_disable_device(pdev);
5475
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005476 /* The error could cause the FW to trigger a flash debug dump.
5477 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005478 * can cause it not to recover; wait for it to finish.
5479 * Wait only for first function as it is needed only once per
5480 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005481 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005482 if (pdev->devfn == 0)
5483 ssleep(30);
5484
Sathya Perlacf588472010-02-14 21:22:01 +00005485 return PCI_ERS_RESULT_NEED_RESET;
5486}
5487
5488static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5489{
5490 struct be_adapter *adapter = pci_get_drvdata(pdev);
5491 int status;
5492
5493 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005494
5495 status = pci_enable_device(pdev);
5496 if (status)
5497 return PCI_ERS_RESULT_DISCONNECT;
5498
5499 pci_set_master(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005500 pci_restore_state(pdev);
5501
5502 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005503 dev_info(&adapter->pdev->dev,
5504 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005505 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005506 if (status)
5507 return PCI_ERS_RESULT_DISCONNECT;
5508
Sathya Perlad6b6d982012-09-05 01:56:48 +00005509 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305510 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005511 return PCI_ERS_RESULT_RECOVERED;
5512}
5513
5514static void be_eeh_resume(struct pci_dev *pdev)
5515{
5516 int status = 0;
5517 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005518
5519 dev_info(&adapter->pdev->dev, "EEH resume\n");
5520
5521 pci_save_state(pdev);
5522
Kalesh AP484d76f2015-02-23 04:20:14 -05005523 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005524 if (status)
5525 goto err;
5526
Padmanabh Ratnakar68f22792016-02-18 03:09:34 +05305527 be_roce_dev_add(adapter);
5528
Padmanabh Ratnakar972f37b2016-02-03 09:49:22 +05305529 be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
Sathya Perlacf588472010-02-14 21:22:01 +00005530 return;
5531err:
5532 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005533}
5534
Vasundhara Volamace40af2015-03-04 00:44:34 -05005535static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5536{
5537 struct be_adapter *adapter = pci_get_drvdata(pdev);
5538 u16 num_vf_qs;
5539 int status;
5540
5541 if (!num_vfs)
5542 be_vf_clear(adapter);
5543
5544 adapter->num_vfs = num_vfs;
5545
5546 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5547 dev_warn(&pdev->dev,
5548 "Cannot disable VFs while they are assigned\n");
5549 return -EBUSY;
5550 }
5551
5552 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5553 * are equally distributed across the max-number of VFs. The user may
5554 * request only a subset of the max-vfs to be enabled.
5555 * Based on num_vfs, redistribute the resources across num_vfs so that
5556 * each VF will have access to more number of resources.
5557 * This facility is not available in BE3 FW.
5558 * Also, this is done by FW in Lancer chip.
5559 */
5560 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
5561 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
5562 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
5563 adapter->num_vfs, num_vf_qs);
5564 if (status)
5565 dev_err(&pdev->dev,
5566 "Failed to optimize SR-IOV resources\n");
5567 }
5568
5569 status = be_get_resources(adapter);
5570 if (status)
5571 return be_cmd_status(status);
5572
5573 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
5574 rtnl_lock();
5575 status = be_update_queues(adapter);
5576 rtnl_unlock();
5577 if (status)
5578 return be_cmd_status(status);
5579
5580 if (adapter->num_vfs)
5581 status = be_vf_setup(adapter);
5582
5583 if (!status)
5584 return adapter->num_vfs;
5585
5586 return 0;
5587}
5588
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005589static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005590 .error_detected = be_eeh_err_detected,
5591 .slot_reset = be_eeh_reset,
5592 .resume = be_eeh_resume,
5593};
5594
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005595static struct pci_driver be_driver = {
5596 .name = DRV_NAME,
5597 .id_table = be_dev_ids,
5598 .probe = be_probe,
5599 .remove = be_remove,
5600 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05005601 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005602 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05005603 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00005604 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005605};
5606
5607static int __init be_init_module(void)
5608{
Joe Perches8e95a202009-12-03 07:58:21 +00005609 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5610 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005611 printk(KERN_WARNING DRV_NAME
5612 " : Module param rx_frag_size must be 2048/4096/8192."
5613 " Using 2048\n");
5614 rx_frag_size = 2048;
5615 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005616
Vasundhara Volamace40af2015-03-04 00:44:34 -05005617 if (num_vfs > 0) {
5618 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
5619 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
5620 }
5621
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005622 return pci_register_driver(&be_driver);
5623}
5624module_init(be_init_module);
5625
5626static void __exit be_exit_module(void)
5627{
5628 pci_unregister_driver(&be_driver);
5629}
5630module_exit(be_exit_module);