blob: c28e3bfdccd75def6aef2522a9aaf827e0d45828 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530182 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000199
200 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530208
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000214
215 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000216 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla8788fdc2009-07-27 22:52:03 +0000219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222{
223 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000227
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530228 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000229 return;
230
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239}
240
Sathya Perla8788fdc2009-07-27 22:52:03 +0000241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242{
243 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000248
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530249 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000250 return;
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256}
257
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
Kalesh APbcc84142015-08-05 03:27:48 -0400276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000285 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000297 }
298
Sathya Perla5a712c12013-07-23 15:24:59 +0530299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000301 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000304 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
dingtianhong61d23e92013-12-30 15:40:43 +0800310 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530311 status = -EPERM;
312 goto err;
313 }
Kalesh APbcc84142015-08-05 03:27:48 -0400314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000317 return 0;
318err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320 return status;
321}
322
Sathya Perlaca34fe32012-11-06 17:48:56 +0000323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500330 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
333 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
337 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500348 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
351 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
355 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000364 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
Sathya Perlaca34fe32012-11-06 17:48:56 +0000408static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
Ajit Khaparde61000862013-10-03 16:16:33 -0500454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530498 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500506}
507
Selvin Xavier005d5692011-05-16 07:36:35 +0000508static void populate_lancer_stats(struct be_adapter *adapter)
509{
Selvin Xavier005d5692011-05-16 07:36:35 +0000510 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000538 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000541 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000542 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000543}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000544
Sathya Perla09c1c682011-08-22 19:41:53 +0000545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
Jingoo Han4188e7d2013-08-05 18:02:02 +0900557static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530558 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000570void be_parse_stats(struct be_adapter *adapter)
571{
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573 struct be_rx_obj *rxo;
574 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000575 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000576
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000579 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500582 else if (BE3_chip(adapter))
583 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000584 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500585 else
586 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000587
Ajit Khaparde61000862013-10-03 16:16:33 -0500588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000589 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000592 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000593 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000594}
595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530597 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000600 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000602 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000603 u64 pkts, bytes;
604 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700605 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606
Sathya Perla3abcded2010-10-03 22:12:27 -0700607 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530609
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700620 }
621
Sathya Perla3c8def92011-06-12 20:01:58 +0000622 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530624
Sathya Perlaab1594e2011-07-25 19:10:15 +0000625 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000632 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
634 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000650
Sathya Perlaab1594e2011-07-25 19:10:15 +0000651 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
653 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000655
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000661 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662}
663
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666 struct net_device *netdev = adapter->netdev;
667
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000669 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000672
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530673 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682{
Sathya Perla3c8def92011-06-12 20:01:58 +0000683 struct be_tx_stats *stats = tx_stats(txo);
684
Sathya Perlaab1594e2011-07-25 19:10:15 +0000685 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000686 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500687 stats->tx_bytes += skb->len;
688 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000689 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690}
691
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500692/* Returns number of WRBs needed for the skb */
693static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500695 /* +1 for the header wrb */
696 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697}
698
699static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
700{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500701 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
702 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
703 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
704 wrb->rsvd0 = 0;
705}
706
707/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
708 * to avoid the swap and shift/mask operations in wrb_fill().
709 */
710static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
711{
712 wrb->frag_pa_hi = 0;
713 wrb->frag_pa_lo = 0;
714 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000715 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700716}
717
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000718static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530719 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000720{
721 u8 vlan_prio;
722 u16 vlan_tag;
723
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100724 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000725 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
726 /* If vlan priority provided by OS is NOT in available bmap */
727 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
728 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
729 adapter->recommended_prio;
730
731 return vlan_tag;
732}
733
Sathya Perlac9c47142014-03-27 10:46:19 +0530734/* Used only for IP tunnel packets */
735static u16 skb_inner_ip_proto(struct sk_buff *skb)
736{
737 return (inner_ip_hdr(skb)->version == 4) ?
738 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
739}
740
741static u16 skb_ip_proto(struct sk_buff *skb)
742{
743 return (ip_hdr(skb)->version == 4) ?
744 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
745}
746
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530747static inline bool be_is_txq_full(struct be_tx_obj *txo)
748{
749 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
750}
751
752static inline bool be_can_txq_wake(struct be_tx_obj *txo)
753{
754 return atomic_read(&txo->q.used) < txo->q.len / 2;
755}
756
757static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
758{
759 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
760}
761
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530762static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
763 struct sk_buff *skb,
764 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530766 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000768 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530769 BE_WRB_F_SET(wrb_params->features, LSO, 1);
770 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000771 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530772 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530774 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530775 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530776 proto = skb_inner_ip_proto(skb);
777 } else {
778 proto = skb_ip_proto(skb);
779 }
780 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530781 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530782 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530783 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784 }
785
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100786 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530787 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
788 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789 }
790
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530791 BE_WRB_F_SET(wrb_params->features, CRC, 1);
792}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500793
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530794static void wrb_fill_hdr(struct be_adapter *adapter,
795 struct be_eth_hdr_wrb *hdr,
796 struct be_wrb_params *wrb_params,
797 struct sk_buff *skb)
798{
799 memset(hdr, 0, sizeof(*hdr));
800
801 SET_TX_WRB_HDR_BITS(crc, hdr,
802 BE_WRB_F_GET(wrb_params->features, CRC));
803 SET_TX_WRB_HDR_BITS(ipcs, hdr,
804 BE_WRB_F_GET(wrb_params->features, IPCS));
805 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
806 BE_WRB_F_GET(wrb_params->features, TCPCS));
807 SET_TX_WRB_HDR_BITS(udpcs, hdr,
808 BE_WRB_F_GET(wrb_params->features, UDPCS));
809
810 SET_TX_WRB_HDR_BITS(lso, hdr,
811 BE_WRB_F_GET(wrb_params->features, LSO));
812 SET_TX_WRB_HDR_BITS(lso6, hdr,
813 BE_WRB_F_GET(wrb_params->features, LSO6));
814 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
815
816 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
817 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500818 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530819 SET_TX_WRB_HDR_BITS(event, hdr,
820 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
821 SET_TX_WRB_HDR_BITS(vlan, hdr,
822 BE_WRB_F_GET(wrb_params->features, VLAN));
823 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
824
825 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
826 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530827 SET_TX_WRB_HDR_BITS(mgmt, hdr,
828 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829}
830
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000831static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530832 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000833{
834 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500835 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000836
Sathya Perla7101e112010-03-22 20:41:12 +0000837
Sathya Perlaf986afc2015-02-06 08:18:43 -0500838 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
839 (u64)le32_to_cpu(wrb->frag_pa_lo);
840 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000841 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500842 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000843 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500844 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000845 }
846}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700847
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530848/* Grab a WRB header for xmit */
849static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530851 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530853 queue_head_inc(&txo->q);
854 return head;
855}
856
857/* Set up the WRB header for xmit */
858static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
859 struct be_tx_obj *txo,
860 struct be_wrb_params *wrb_params,
861 struct sk_buff *skb, u16 head)
862{
863 u32 num_frags = skb_wrb_cnt(skb);
864 struct be_queue_info *txq = &txo->q;
865 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
866
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530867 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500868 be_dws_cpu_to_le(hdr, sizeof(*hdr));
869
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500870 BUG_ON(txo->sent_skb_list[head]);
871 txo->sent_skb_list[head] = skb;
872 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530873 atomic_add(num_frags, &txq->used);
874 txo->last_req_wrb_cnt = num_frags;
875 txo->pend_wrb_cnt += num_frags;
876}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530878/* Setup a WRB fragment (buffer descriptor) for xmit */
879static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
880 int len)
881{
882 struct be_eth_wrb *wrb;
883 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530885 wrb = queue_head_node(txq);
886 wrb_fill(wrb, busaddr, len);
887 queue_head_inc(txq);
888}
889
890/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
891 * was invoked. The producer index is restored to the previous packet and the
892 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
893 */
894static void be_xmit_restore(struct be_adapter *adapter,
895 struct be_tx_obj *txo, u16 head, bool map_single,
896 u32 copied)
897{
898 struct device *dev;
899 struct be_eth_wrb *wrb;
900 struct be_queue_info *txq = &txo->q;
901
902 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500903 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530904
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500905 /* skip the first wrb (hdr); it's not mapped */
906 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000907 while (copied) {
908 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000909 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000910 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500911 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000912 queue_head_inc(txq);
913 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530914
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500915 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530916}
917
918/* Enqueue the given packet for transmit. This routine allocates WRBs for the
919 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
920 * of WRBs used up by the packet.
921 */
922static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
923 struct sk_buff *skb,
924 struct be_wrb_params *wrb_params)
925{
926 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
927 struct device *dev = &adapter->pdev->dev;
928 struct be_queue_info *txq = &txo->q;
929 bool map_single = false;
930 u16 head = txq->head;
931 dma_addr_t busaddr;
932 int len;
933
934 head = be_tx_get_wrb_hdr(txo);
935
936 if (skb->len > skb->data_len) {
937 len = skb_headlen(skb);
938
939 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
940 if (dma_mapping_error(dev, busaddr))
941 goto dma_err;
942 map_single = true;
943 be_tx_setup_wrb_frag(txo, busaddr, len);
944 copied += len;
945 }
946
947 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
948 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
949 len = skb_frag_size(frag);
950
951 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
952 if (dma_mapping_error(dev, busaddr))
953 goto dma_err;
954 be_tx_setup_wrb_frag(txo, busaddr, len);
955 copied += len;
956 }
957
958 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
959
960 be_tx_stats_update(txo, skb);
961 return wrb_cnt;
962
963dma_err:
964 adapter->drv_stats.dma_map_errors++;
965 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000966 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700967}
968
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500969static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
970{
971 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
972}
973
Somnath Kotur93040ae2012-06-26 22:32:10 +0000974static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000975 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530976 struct be_wrb_params
977 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000978{
979 u16 vlan_tag = 0;
980
981 skb = skb_share_check(skb, GFP_ATOMIC);
982 if (unlikely(!skb))
983 return skb;
984
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100985 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000986 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530987
988 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
989 if (!vlan_tag)
990 vlan_tag = adapter->pvid;
991 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
992 * skip VLAN insertion
993 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530994 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530995 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000996
997 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100998 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
999 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001000 if (unlikely(!skb))
1001 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001002 skb->vlan_tci = 0;
1003 }
1004
1005 /* Insert the outer VLAN, if any */
1006 if (adapter->qnq_vid) {
1007 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001008 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1009 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001010 if (unlikely(!skb))
1011 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301012 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001013 }
1014
Somnath Kotur93040ae2012-06-26 22:32:10 +00001015 return skb;
1016}
1017
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001018static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1019{
1020 struct ethhdr *eh = (struct ethhdr *)skb->data;
1021 u16 offset = ETH_HLEN;
1022
1023 if (eh->h_proto == htons(ETH_P_IPV6)) {
1024 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1025
1026 offset += sizeof(struct ipv6hdr);
1027 if (ip6h->nexthdr != NEXTHDR_TCP &&
1028 ip6h->nexthdr != NEXTHDR_UDP) {
1029 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301030 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001031
1032 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1033 if (ehdr->hdrlen == 0xff)
1034 return true;
1035 }
1036 }
1037 return false;
1038}
1039
1040static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1041{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001042 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001043}
1044
Sathya Perla748b5392014-05-09 13:29:13 +05301045static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001046{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001047 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001048}
1049
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301050static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1051 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301052 struct be_wrb_params
1053 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001055 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001056 unsigned int eth_hdr_len;
1057 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001058
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001059 /* For padded packets, BE HW modifies tot_len field in IP header
1060 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001061 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001062 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001063 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1064 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001065 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001066 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001067 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001068 ip = (struct iphdr *)ip_hdr(skb);
1069 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1070 }
1071
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001072 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301073 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001074 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301075 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001076 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301077 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001078
Somnath Kotur93040ae2012-06-26 22:32:10 +00001079 /* HW has a bug wherein it will calculate CSUM for VLAN
1080 * pkts even though it is disabled.
1081 * Manually insert VLAN in pkt.
1082 */
1083 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001084 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301085 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001086 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301087 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001088 }
1089
1090 /* HW may lockup when VLAN HW tagging is requested on
1091 * certain ipv6 packets. Drop such pkts if the HW workaround to
1092 * skip HW tagging is not enabled by FW.
1093 */
1094 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301095 (adapter->pvid || adapter->qnq_vid) &&
1096 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001097 goto tx_drop;
1098
1099 /* Manual VLAN tag insertion to prevent:
1100 * ASIC lockup when the ASIC inserts VLAN tag into
1101 * certain ipv6 packets. Insert VLAN tags in driver,
1102 * and set event, completion, vlan bits accordingly
1103 * in the Tx WRB.
1104 */
1105 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1106 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301107 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001108 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301109 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001110 }
1111
Sathya Perlaee9c7992013-05-22 23:04:55 +00001112 return skb;
1113tx_drop:
1114 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301115err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001116 return NULL;
1117}
1118
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301119static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1120 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301121 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301122{
1123 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1124 * less may cause a transmit stall on that port. So the work-around is
1125 * to pad short packets (<= 32 bytes) to a 36-byte length.
1126 */
1127 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001128 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301129 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301130 }
1131
1132 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301133 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301134 if (!skb)
1135 return NULL;
1136 }
1137
1138 return skb;
1139}
1140
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001141static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1142{
1143 struct be_queue_info *txq = &txo->q;
1144 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1145
1146 /* Mark the last request eventable if it hasn't been marked already */
1147 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1148 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1149
1150 /* compose a dummy wrb if there are odd set of wrbs to notify */
1151 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001152 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001153 queue_head_inc(txq);
1154 atomic_inc(&txq->used);
1155 txo->pend_wrb_cnt++;
1156 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1157 TX_HDR_WRB_NUM_SHIFT);
1158 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1159 TX_HDR_WRB_NUM_SHIFT);
1160 }
1161 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1162 txo->pend_wrb_cnt = 0;
1163}
1164
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301165/* OS2BMC related */
1166
1167#define DHCP_CLIENT_PORT 68
1168#define DHCP_SERVER_PORT 67
1169#define NET_BIOS_PORT1 137
1170#define NET_BIOS_PORT2 138
1171#define DHCPV6_RAS_PORT 547
1172
1173#define is_mc_allowed_on_bmc(adapter, eh) \
1174 (!is_multicast_filt_enabled(adapter) && \
1175 is_multicast_ether_addr(eh->h_dest) && \
1176 !is_broadcast_ether_addr(eh->h_dest))
1177
1178#define is_bc_allowed_on_bmc(adapter, eh) \
1179 (!is_broadcast_filt_enabled(adapter) && \
1180 is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_arp_allowed_on_bmc(adapter, skb) \
1183 (is_arp(skb) && is_arp_filt_enabled(adapter))
1184
1185#define is_broadcast_packet(eh, adapter) \
1186 (is_multicast_ether_addr(eh->h_dest) && \
1187 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1188
1189#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1190
1191#define is_arp_filt_enabled(adapter) \
1192 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1193
1194#define is_dhcp_client_filt_enabled(adapter) \
1195 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1196
1197#define is_dhcp_srvr_filt_enabled(adapter) \
1198 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1199
1200#define is_nbios_filt_enabled(adapter) \
1201 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1202
1203#define is_ipv6_na_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & \
1205 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1206
1207#define is_ipv6_ra_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1209
1210#define is_ipv6_ras_filt_enabled(adapter) \
1211 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1212
1213#define is_broadcast_filt_enabled(adapter) \
1214 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1215
1216#define is_multicast_filt_enabled(adapter) \
1217 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1218
1219static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1220 struct sk_buff **skb)
1221{
1222 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1223 bool os2bmc = false;
1224
1225 if (!be_is_os2bmc_enabled(adapter))
1226 goto done;
1227
1228 if (!is_multicast_ether_addr(eh->h_dest))
1229 goto done;
1230
1231 if (is_mc_allowed_on_bmc(adapter, eh) ||
1232 is_bc_allowed_on_bmc(adapter, eh) ||
1233 is_arp_allowed_on_bmc(adapter, (*skb))) {
1234 os2bmc = true;
1235 goto done;
1236 }
1237
1238 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1239 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1240 u8 nexthdr = hdr->nexthdr;
1241
1242 if (nexthdr == IPPROTO_ICMPV6) {
1243 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1244
1245 switch (icmp6->icmp6_type) {
1246 case NDISC_ROUTER_ADVERTISEMENT:
1247 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1248 goto done;
1249 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1250 os2bmc = is_ipv6_na_filt_enabled(adapter);
1251 goto done;
1252 default:
1253 break;
1254 }
1255 }
1256 }
1257
1258 if (is_udp_pkt((*skb))) {
1259 struct udphdr *udp = udp_hdr((*skb));
1260
1261 switch (udp->dest) {
1262 case DHCP_CLIENT_PORT:
1263 os2bmc = is_dhcp_client_filt_enabled(adapter);
1264 goto done;
1265 case DHCP_SERVER_PORT:
1266 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1267 goto done;
1268 case NET_BIOS_PORT1:
1269 case NET_BIOS_PORT2:
1270 os2bmc = is_nbios_filt_enabled(adapter);
1271 goto done;
1272 case DHCPV6_RAS_PORT:
1273 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1274 goto done;
1275 default:
1276 break;
1277 }
1278 }
1279done:
1280 /* For packets over a vlan, which are destined
1281 * to BMC, asic expects the vlan to be inline in the packet.
1282 */
1283 if (os2bmc)
1284 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1285
1286 return os2bmc;
1287}
1288
Sathya Perlaee9c7992013-05-22 23:04:55 +00001289static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1290{
1291 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001292 u16 q_idx = skb_get_queue_mapping(skb);
1293 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301294 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301295 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001296 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001297
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301298 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001299 if (unlikely(!skb))
1300 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001301
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301302 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1303
1304 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001305 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001306 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001307 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001309
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301310 /* if os2bmc is enabled and if the pkt is destined to bmc,
1311 * enqueue the pkt a 2nd time with mgmt bit set.
1312 */
1313 if (be_send_pkt_to_bmc(adapter, &skb)) {
1314 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1315 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1316 if (unlikely(!wrb_cnt))
1317 goto drop;
1318 else
1319 skb_get(skb);
1320 }
1321
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301322 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001323 netif_stop_subqueue(netdev, q_idx);
1324 tx_stats(txo)->tx_stops++;
1325 }
1326
1327 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1328 be_xmit_flush(adapter, txo);
1329
1330 return NETDEV_TX_OK;
1331drop:
1332 tx_stats(txo)->tx_drv_drops++;
1333 /* Flush the already enqueued tx requests */
1334 if (flush && txo->pend_wrb_cnt)
1335 be_xmit_flush(adapter, txo);
1336
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337 return NETDEV_TX_OK;
1338}
1339
1340static int be_change_mtu(struct net_device *netdev, int new_mtu)
1341{
1342 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301343 struct device *dev = &adapter->pdev->dev;
1344
1345 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1346 dev_info(dev, "MTU must be between %d and %d bytes\n",
1347 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 return -EINVAL;
1349 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301350
1351 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301352 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353 netdev->mtu = new_mtu;
1354 return 0;
1355}
1356
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001357static inline bool be_in_all_promisc(struct be_adapter *adapter)
1358{
1359 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1360 BE_IF_FLAGS_ALL_PROMISCUOUS;
1361}
1362
1363static int be_set_vlan_promisc(struct be_adapter *adapter)
1364{
1365 struct device *dev = &adapter->pdev->dev;
1366 int status;
1367
1368 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1369 return 0;
1370
1371 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1372 if (!status) {
1373 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1374 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1375 } else {
1376 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1377 }
1378 return status;
1379}
1380
1381static int be_clear_vlan_promisc(struct be_adapter *adapter)
1382{
1383 struct device *dev = &adapter->pdev->dev;
1384 int status;
1385
1386 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1387 if (!status) {
1388 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1389 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1390 }
1391 return status;
1392}
1393
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001395 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1396 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 */
Sathya Perla10329df2012-06-05 19:37:18 +00001398static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399{
Vasundhara Volam50762662014-09-12 17:39:14 +05301400 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001401 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301402 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001403 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001404
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001405 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001406 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001407 return 0;
1408
Sathya Perla92bf14a2013-08-27 16:57:32 +05301409 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001410 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001411
1412 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301413 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1414 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001415
Vasundhara Volam435452a2015-03-20 06:28:23 -04001416 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001417 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001418 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001419 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001420 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1421 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301422 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001423 return be_set_vlan_promisc(adapter);
1424 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1425 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001427 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428}
1429
Patrick McHardy80d5c362013-04-19 02:04:28 +00001430static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431{
1432 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001433 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001435 /* Packets with VID 0 are always received by Lancer by default */
1436 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301437 return status;
1438
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301439 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301440 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001441
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301442 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301443 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001444
Somnath Kotura6b74e02014-01-21 15:50:55 +05301445 status = be_vid_config(adapter);
1446 if (status) {
1447 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301448 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301449 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301450
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001451 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452}
1453
Patrick McHardy80d5c362013-04-19 02:04:28 +00001454static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455{
1456 struct be_adapter *adapter = netdev_priv(netdev);
1457
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001458 /* Packets with VID 0 are always received by Lancer by default */
1459 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301460 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001461
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301462 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301463 adapter->vlans_added--;
1464
1465 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466}
1467
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001468static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301469{
Sathya Perlaac34b742015-02-06 08:18:40 -05001470 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001471 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1472}
1473
1474static void be_set_all_promisc(struct be_adapter *adapter)
1475{
1476 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1477 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1478}
1479
1480static void be_set_mc_promisc(struct be_adapter *adapter)
1481{
1482 int status;
1483
1484 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1485 return;
1486
1487 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1488 if (!status)
1489 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1490}
1491
1492static void be_set_mc_list(struct be_adapter *adapter)
1493{
1494 int status;
1495
1496 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1497 if (!status)
1498 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1499 else
1500 be_set_mc_promisc(adapter);
1501}
1502
1503static void be_set_uc_list(struct be_adapter *adapter)
1504{
1505 struct netdev_hw_addr *ha;
1506 int i = 1; /* First slot is claimed by the Primary MAC */
1507
1508 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1509 be_cmd_pmac_del(adapter, adapter->if_handle,
1510 adapter->pmac_id[i], 0);
1511
1512 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1513 be_set_all_promisc(adapter);
1514 return;
1515 }
1516
1517 netdev_for_each_uc_addr(ha, adapter->netdev) {
1518 adapter->uc_macs++; /* First slot is for Primary MAC */
1519 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1520 &adapter->pmac_id[adapter->uc_macs], 0);
1521 }
1522}
1523
1524static void be_clear_uc_list(struct be_adapter *adapter)
1525{
1526 int i;
1527
1528 for (i = 1; i < (adapter->uc_macs + 1); i++)
1529 be_cmd_pmac_del(adapter, adapter->if_handle,
1530 adapter->pmac_id[i], 0);
1531 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301532}
1533
Sathya Perlaa54769f2011-10-24 02:45:00 +00001534static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535{
1536 struct be_adapter *adapter = netdev_priv(netdev);
1537
1538 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001539 be_set_all_promisc(adapter);
1540 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001542
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001543 /* Interface was previously in promiscuous mode; disable it */
1544 if (be_in_all_promisc(adapter)) {
1545 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001546 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001547 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001548 }
1549
Sathya Perlae7b909a2009-11-22 22:01:10 +00001550 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001551 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001552 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1553 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301554 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001555 }
Kalesh APa0794882014-05-30 19:06:23 +05301556
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001557 if (netdev_uc_count(netdev) != adapter->uc_macs)
1558 be_set_uc_list(adapter);
1559
1560 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561}
1562
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001563static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1564{
1565 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001566 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001567 int status;
1568
Sathya Perla11ac75e2011-12-13 00:58:50 +00001569 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001570 return -EPERM;
1571
Sathya Perla11ac75e2011-12-13 00:58:50 +00001572 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001573 return -EINVAL;
1574
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301575 /* Proceed further only if user provided MAC is different
1576 * from active MAC
1577 */
1578 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1579 return 0;
1580
Sathya Perla3175d8c2013-07-23 15:25:03 +05301581 if (BEx_chip(adapter)) {
1582 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1583 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001584
Sathya Perla11ac75e2011-12-13 00:58:50 +00001585 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1586 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301587 } else {
1588 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1589 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001590 }
1591
Kalesh APabccf232014-07-17 16:20:24 +05301592 if (status) {
1593 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1594 mac, vf, status);
1595 return be_cmd_status(status);
1596 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001597
Kalesh APabccf232014-07-17 16:20:24 +05301598 ether_addr_copy(vf_cfg->mac_addr, mac);
1599
1600 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001601}
1602
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001603static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301604 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001605{
1606 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001607 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001608
Sathya Perla11ac75e2011-12-13 00:58:50 +00001609 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001610 return -EPERM;
1611
Sathya Perla11ac75e2011-12-13 00:58:50 +00001612 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001613 return -EINVAL;
1614
1615 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001616 vi->max_tx_rate = vf_cfg->tx_rate;
1617 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001618 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1619 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001620 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301621 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001622 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001623
1624 return 0;
1625}
1626
Vasundhara Volam435452a2015-03-20 06:28:23 -04001627static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1628{
1629 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1630 u16 vids[BE_NUM_VLANS_SUPPORTED];
1631 int vf_if_id = vf_cfg->if_handle;
1632 int status;
1633
1634 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001635 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001636 if (status)
1637 return status;
1638
1639 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1640 vids[0] = 0;
1641 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1642 if (!status)
1643 dev_info(&adapter->pdev->dev,
1644 "Cleared guest VLANs on VF%d", vf);
1645
1646 /* After TVT is enabled, disallow VFs to program VLAN filters */
1647 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1648 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1649 ~BE_PRIV_FILTMGMT, vf + 1);
1650 if (!status)
1651 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1652 }
1653 return 0;
1654}
1655
1656static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1657{
1658 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1659 struct device *dev = &adapter->pdev->dev;
1660 int status;
1661
1662 /* Reset Transparent VLAN Tagging. */
1663 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001664 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001665 if (status)
1666 return status;
1667
1668 /* Allow VFs to program VLAN filtering */
1669 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1670 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1671 BE_PRIV_FILTMGMT, vf + 1);
1672 if (!status) {
1673 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1674 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1675 }
1676 }
1677
1678 dev_info(dev,
1679 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1680 return 0;
1681}
1682
Sathya Perla748b5392014-05-09 13:29:13 +05301683static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001684{
1685 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001686 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001687 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001688
Sathya Perla11ac75e2011-12-13 00:58:50 +00001689 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001690 return -EPERM;
1691
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001692 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001693 return -EINVAL;
1694
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001695 if (vlan || qos) {
1696 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001697 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001698 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001699 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001700 }
1701
Kalesh APabccf232014-07-17 16:20:24 +05301702 if (status) {
1703 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001704 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1705 status);
Kalesh APabccf232014-07-17 16:20:24 +05301706 return be_cmd_status(status);
1707 }
1708
1709 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301710 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001711}
1712
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001713static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1714 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001715{
1716 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301717 struct device *dev = &adapter->pdev->dev;
1718 int percent_rate, status = 0;
1719 u16 link_speed = 0;
1720 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001721
Sathya Perla11ac75e2011-12-13 00:58:50 +00001722 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001723 return -EPERM;
1724
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001725 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001726 return -EINVAL;
1727
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001728 if (min_tx_rate)
1729 return -EINVAL;
1730
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301731 if (!max_tx_rate)
1732 goto config_qos;
1733
1734 status = be_cmd_link_status_query(adapter, &link_speed,
1735 &link_status, 0);
1736 if (status)
1737 goto err;
1738
1739 if (!link_status) {
1740 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301741 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301742 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001743 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001744
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301745 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1746 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1747 link_speed);
1748 status = -EINVAL;
1749 goto err;
1750 }
1751
1752 /* On Skyhawk the QOS setting must be done only as a % value */
1753 percent_rate = link_speed / 100;
1754 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1755 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1756 percent_rate);
1757 status = -EINVAL;
1758 goto err;
1759 }
1760
1761config_qos:
1762 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001763 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301764 goto err;
1765
1766 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1767 return 0;
1768
1769err:
1770 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1771 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301772 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001773}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301774
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301775static int be_set_vf_link_state(struct net_device *netdev, int vf,
1776 int link_state)
1777{
1778 struct be_adapter *adapter = netdev_priv(netdev);
1779 int status;
1780
1781 if (!sriov_enabled(adapter))
1782 return -EPERM;
1783
1784 if (vf >= adapter->num_vfs)
1785 return -EINVAL;
1786
1787 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301788 if (status) {
1789 dev_err(&adapter->pdev->dev,
1790 "Link state change on VF %d failed: %#x\n", vf, status);
1791 return be_cmd_status(status);
1792 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301793
Kalesh APabccf232014-07-17 16:20:24 +05301794 adapter->vf_cfg[vf].plink_tracking = link_state;
1795
1796 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301797}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001798
Kalesh APe7bcbd72015-05-06 05:30:32 -04001799static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1800{
1801 struct be_adapter *adapter = netdev_priv(netdev);
1802 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1803 u8 spoofchk;
1804 int status;
1805
1806 if (!sriov_enabled(adapter))
1807 return -EPERM;
1808
1809 if (vf >= adapter->num_vfs)
1810 return -EINVAL;
1811
1812 if (BEx_chip(adapter))
1813 return -EOPNOTSUPP;
1814
1815 if (enable == vf_cfg->spoofchk)
1816 return 0;
1817
1818 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1819
1820 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1821 0, spoofchk);
1822 if (status) {
1823 dev_err(&adapter->pdev->dev,
1824 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1825 return be_cmd_status(status);
1826 }
1827
1828 vf_cfg->spoofchk = enable;
1829 return 0;
1830}
1831
Sathya Perla2632baf2013-10-01 16:00:00 +05301832static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1833 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834{
Sathya Perla2632baf2013-10-01 16:00:00 +05301835 aic->rx_pkts_prev = rx_pkts;
1836 aic->tx_reqs_prev = tx_pkts;
1837 aic->jiffies = now;
1838}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001839
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001840static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301841{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001842 struct be_adapter *adapter = eqo->adapter;
1843 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301844 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301845 struct be_rx_obj *rxo;
1846 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001847 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301848 ulong now;
1849 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001850 int i;
1851
1852 aic = &adapter->aic_obj[eqo->idx];
1853 if (!aic->enable) {
1854 if (aic->jiffies)
1855 aic->jiffies = 0;
1856 eqd = aic->et_eqd;
1857 return eqd;
1858 }
1859
1860 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1861 do {
1862 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1863 rx_pkts += rxo->stats.rx_pkts;
1864 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1865 }
1866
1867 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1868 do {
1869 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1870 tx_pkts += txo->stats.tx_reqs;
1871 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1872 }
1873
1874 /* Skip, if wrapped around or first calculation */
1875 now = jiffies;
1876 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1877 rx_pkts < aic->rx_pkts_prev ||
1878 tx_pkts < aic->tx_reqs_prev) {
1879 be_aic_update(aic, rx_pkts, tx_pkts, now);
1880 return aic->prev_eqd;
1881 }
1882
1883 delta = jiffies_to_msecs(now - aic->jiffies);
1884 if (delta == 0)
1885 return aic->prev_eqd;
1886
1887 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1888 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1889 eqd = (pps / 15000) << 2;
1890
1891 if (eqd < 8)
1892 eqd = 0;
1893 eqd = min_t(u32, eqd, aic->max_eqd);
1894 eqd = max_t(u32, eqd, aic->min_eqd);
1895
1896 be_aic_update(aic, rx_pkts, tx_pkts, now);
1897
1898 return eqd;
1899}
1900
1901/* For Skyhawk-R only */
1902static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1903{
1904 struct be_adapter *adapter = eqo->adapter;
1905 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1906 ulong now = jiffies;
1907 int eqd;
1908 u32 mult_enc;
1909
1910 if (!aic->enable)
1911 return 0;
1912
1913 if (time_before_eq(now, aic->jiffies) ||
1914 jiffies_to_msecs(now - aic->jiffies) < 1)
1915 eqd = aic->prev_eqd;
1916 else
1917 eqd = be_get_new_eqd(eqo);
1918
1919 if (eqd > 100)
1920 mult_enc = R2I_DLY_ENC_1;
1921 else if (eqd > 60)
1922 mult_enc = R2I_DLY_ENC_2;
1923 else if (eqd > 20)
1924 mult_enc = R2I_DLY_ENC_3;
1925 else
1926 mult_enc = R2I_DLY_ENC_0;
1927
1928 aic->prev_eqd = eqd;
1929
1930 return mult_enc;
1931}
1932
1933void be_eqd_update(struct be_adapter *adapter, bool force_update)
1934{
1935 struct be_set_eqd set_eqd[MAX_EVT_QS];
1936 struct be_aic_obj *aic;
1937 struct be_eq_obj *eqo;
1938 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001939
Sathya Perla2632baf2013-10-01 16:00:00 +05301940 for_all_evt_queues(adapter, eqo, i) {
1941 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001942 eqd = be_get_new_eqd(eqo);
1943 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301944 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1945 set_eqd[num].eq_id = eqo->q.id;
1946 aic->prev_eqd = eqd;
1947 num++;
1948 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001949 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301950
1951 if (num)
1952 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001953}
1954
Sathya Perla3abcded2010-10-03 22:12:27 -07001955static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301956 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001957{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001958 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001959
Sathya Perlaab1594e2011-07-25 19:10:15 +00001960 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001961 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001962 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001963 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001964 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001965 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001966 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001967 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001968 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969}
1970
Sathya Perla2e588f82011-03-11 02:49:26 +00001971static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001972{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001973 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301974 * Also ignore ipcksm for ipv6 pkts
1975 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301977 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001978}
1979
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301980static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001984 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301985 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986
Sathya Perla3abcded2010-10-03 22:12:27 -07001987 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 BUG_ON(!rx_page_info->page);
1989
Sathya Perlae50287b2014-03-04 12:14:38 +05301990 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001991 dma_unmap_page(&adapter->pdev->dev,
1992 dma_unmap_addr(rx_page_info, bus),
1993 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301994 rx_page_info->last_frag = false;
1995 } else {
1996 dma_sync_single_for_cpu(&adapter->pdev->dev,
1997 dma_unmap_addr(rx_page_info, bus),
1998 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001999 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002000
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302001 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002 atomic_dec(&rxq->used);
2003 return rx_page_info;
2004}
2005
2006/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007static void be_rx_compl_discard(struct be_rx_obj *rxo,
2008 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002011 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002013 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302014 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002015 put_page(page_info->page);
2016 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017 }
2018}
2019
2020/*
2021 * skb_fill_rx_data forms a complete skb for an ether frame
2022 * indicated by rxcp.
2023 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2025 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002028 u16 i, j;
2029 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002030 u8 *start;
2031
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302032 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033 start = page_address(page_info->page) + page_info->page_offset;
2034 prefetch(start);
2035
2036 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039 skb->len = curr_frag_len;
2040 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002041 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042 /* Complete packet has now been moved to data */
2043 put_page(page_info->page);
2044 skb->data_len = 0;
2045 skb->tail += curr_frag_len;
2046 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002047 hdr_len = ETH_HLEN;
2048 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002050 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051 skb_shinfo(skb)->frags[0].page_offset =
2052 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302053 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2054 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002056 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057 skb->tail += hdr_len;
2058 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002059 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060
Sathya Perla2e588f82011-03-11 02:49:26 +00002061 if (rxcp->pkt_size <= rx_frag_size) {
2062 BUG_ON(rxcp->num_rcvd != 1);
2063 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064 }
2065
2066 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002067 remaining = rxcp->pkt_size - curr_frag_len;
2068 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302069 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002070 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002072 /* Coalesce all frags from the same physical page in one slot */
2073 if (page_info->page_offset == 0) {
2074 /* Fresh page */
2075 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002076 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002077 skb_shinfo(skb)->frags[j].page_offset =
2078 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002079 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002080 skb_shinfo(skb)->nr_frags++;
2081 } else {
2082 put_page(page_info->page);
2083 }
2084
Eric Dumazet9e903e02011-10-18 21:00:24 +00002085 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002086 skb->len += curr_frag_len;
2087 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002088 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002089 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002090 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002092 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093}
2094
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002095/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302096static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002097 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002100 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002101 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002102
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002103 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002104 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002105 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107 return;
2108 }
2109
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002110 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002113 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002114 else
2115 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002117 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002118 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002119 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002120 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302121
Tom Herbertb6c0e892014-08-27 21:27:17 -07002122 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302123 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124
Jiri Pirko343e43c2011-08-25 02:50:51 +00002125 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002126 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002127
2128 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129}
2130
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002131/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002132static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2133 struct napi_struct *napi,
2134 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002136 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002138 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002139 u16 remaining, curr_frag_len;
2140 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002143 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002144 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002145 return;
2146 }
2147
Sathya Perla2e588f82011-03-11 02:49:26 +00002148 remaining = rxcp->pkt_size;
2149 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302150 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151
2152 curr_frag_len = min(remaining, rx_frag_size);
2153
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002154 /* Coalesce all frags from the same physical page in one slot */
2155 if (i == 0 || page_info->page_offset == 0) {
2156 /* First frag or Fresh page */
2157 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002158 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002159 skb_shinfo(skb)->frags[j].page_offset =
2160 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002161 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002162 } else {
2163 put_page(page_info->page);
2164 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002165 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002166 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168 memset(page_info, 0, sizeof(*page_info));
2169 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002170 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002172 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002173 skb->len = rxcp->pkt_size;
2174 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002175 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002176 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002177 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08002178 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302179
Tom Herbertb6c0e892014-08-27 21:27:17 -07002180 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302181 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002182
Jiri Pirko343e43c2011-08-25 02:50:51 +00002183 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002184 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002185
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002186 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187}
2188
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2190 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302192 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2193 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2194 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2195 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2196 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2197 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2198 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2199 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2200 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2201 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2202 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002203 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302204 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2205 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002206 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302207 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302208 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302209 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002210}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2213 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002214{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302215 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2216 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2217 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2218 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2219 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2220 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2221 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2222 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2223 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2224 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2225 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002226 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302227 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2228 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002229 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302230 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2231 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002232}
2233
2234static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2235{
2236 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2237 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2238 struct be_adapter *adapter = rxo->adapter;
2239
2240 /* For checking the valid bit it is Ok to use either definition as the
2241 * valid bit is at the same position in both v0 and v1 Rx compl */
2242 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243 return NULL;
2244
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002245 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002246 be_dws_le_to_cpu(compl, sizeof(*compl));
2247
2248 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002250 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002252
Somnath Koture38b1702013-05-29 22:55:56 +00002253 if (rxcp->ip_frag)
2254 rxcp->l4_csum = 0;
2255
Sathya Perla15d72182011-03-21 20:49:26 +00002256 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302257 /* In QNQ modes, if qnq bit is not set, then the packet was
2258 * tagged only with the transparent outer vlan-tag and must
2259 * not be treated as a vlan packet by host
2260 */
2261 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002262 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002263
Sathya Perla15d72182011-03-21 20:49:26 +00002264 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002265 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002266
Somnath Kotur939cf302011-08-18 21:51:49 -07002267 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302268 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002269 rxcp->vlanf = 0;
2270 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002271
2272 /* As the compl has been parsed, reset it; we wont touch it again */
2273 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274
Sathya Perla3abcded2010-10-03 22:12:27 -07002275 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276 return rxcp;
2277}
2278
Eric Dumazet1829b082011-03-01 05:48:12 +00002279static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002282
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002283 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002284 gfp |= __GFP_COMP;
2285 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286}
2287
2288/*
2289 * Allocate a page, split it to fragments of size rx_frag_size and post as
2290 * receive buffers to BE
2291 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302292static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293{
Sathya Perla3abcded2010-10-03 22:12:27 -07002294 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002295 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002296 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002298 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299 struct be_eth_rx_d *rxd;
2300 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302301 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302
Sathya Perla3abcded2010-10-03 22:12:27 -07002303 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302304 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002306 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002308 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 break;
2310 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002311 page_dmaaddr = dma_map_page(dev, pagep, 0,
2312 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002313 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002314 if (dma_mapping_error(dev, page_dmaaddr)) {
2315 put_page(pagep);
2316 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302317 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002318 break;
2319 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302320 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321 } else {
2322 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302323 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302325 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327
2328 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302329 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2331 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332
2333 /* Any space left in the current big page for another frag? */
2334 if ((page_offset + rx_frag_size + rx_frag_size) >
2335 adapter->big_page_size) {
2336 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302337 page_info->last_frag = true;
2338 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2339 } else {
2340 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002341 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002342
2343 prev_page_info = page_info;
2344 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002345 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302347
2348 /* Mark the last frag of a page when we break out of the above loop
2349 * with no more slots available in the RXQ
2350 */
2351 if (pagep) {
2352 prev_page_info->last_frag = true;
2353 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2354 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002355
2356 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302358 if (rxo->rx_post_starved)
2359 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302360 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002361 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302362 be_rxq_notify(adapter, rxq->id, notify);
2363 posted -= notify;
2364 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002365 } else if (atomic_read(&rxq->used) == 0) {
2366 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002367 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369}
2370
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302371static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302373 struct be_queue_info *tx_cq = &txo->cq;
2374 struct be_tx_compl_info *txcp = &txo->txcp;
2375 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302377 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378 return NULL;
2379
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302380 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002381 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302382 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302384 txcp->status = GET_TX_COMPL_BITS(status, compl);
2385 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302387 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388 queue_tail_inc(tx_cq);
2389 return txcp;
2390}
2391
Sathya Perla3c8def92011-06-12 20:01:58 +00002392static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302393 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002394{
Sathya Perla3c8def92011-06-12 20:01:58 +00002395 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002396 struct be_queue_info *txq = &txo->q;
2397 u16 frag_index, num_wrbs = 0;
2398 struct sk_buff *skb = NULL;
2399 bool unmap_skb_hdr = false;
2400 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002402 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002403 if (sent_skbs[txq->tail]) {
2404 /* Free skb from prev req */
2405 if (skb)
2406 dev_consume_skb_any(skb);
2407 skb = sent_skbs[txq->tail];
2408 sent_skbs[txq->tail] = NULL;
2409 queue_tail_inc(txq); /* skip hdr wrb */
2410 num_wrbs++;
2411 unmap_skb_hdr = true;
2412 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002413 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002414 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002415 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002416 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002417 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002419 num_wrbs++;
2420 } while (frag_index != last_index);
2421 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002422
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002423 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002424}
2425
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426/* Return the number of events in the event queue */
2427static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002428{
2429 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002431
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002432 do {
2433 eqe = queue_tail_node(&eqo->q);
2434 if (eqe->evt == 0)
2435 break;
2436
2437 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002438 eqe->evt = 0;
2439 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 queue_tail_inc(&eqo->q);
2441 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002442
2443 return num;
2444}
2445
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002446/* Leaves the EQ is disarmed state */
2447static void be_eq_clean(struct be_eq_obj *eqo)
2448{
2449 int num = events_get(eqo);
2450
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002451 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002452}
2453
Kalesh AP99b44302015-08-05 03:27:49 -04002454/* Free posted rx buffers that were not used */
2455static void be_rxq_clean(struct be_rx_obj *rxo)
2456{
2457 struct be_queue_info *rxq = &rxo->q;
2458 struct be_rx_page_info *page_info;
2459
2460 while (atomic_read(&rxq->used) > 0) {
2461 page_info = get_rx_page_info(rxo);
2462 put_page(page_info->page);
2463 memset(page_info, 0, sizeof(*page_info));
2464 }
2465 BUG_ON(atomic_read(&rxq->used));
2466 rxq->tail = 0;
2467 rxq->head = 0;
2468}
2469
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002470static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002471{
Sathya Perla3abcded2010-10-03 22:12:27 -07002472 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002473 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002474 struct be_adapter *adapter = rxo->adapter;
2475 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002476
Sathya Perlad23e9462012-12-17 19:38:51 +00002477 /* Consume pending rx completions.
2478 * Wait for the flush completion (identified by zero num_rcvd)
2479 * to arrive. Notify CQ even when there are no more CQ entries
2480 * for HW to flush partially coalesced CQ entries.
2481 * In Lancer, there is no need to wait for flush compl.
2482 */
2483 for (;;) {
2484 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302485 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002486 if (lancer_chip(adapter))
2487 break;
2488
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302489 if (flush_wait++ > 50 ||
2490 be_check_error(adapter,
2491 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002492 dev_warn(&adapter->pdev->dev,
2493 "did not receive flush compl\n");
2494 break;
2495 }
2496 be_cq_notify(adapter, rx_cq->id, true, 0);
2497 mdelay(1);
2498 } else {
2499 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002500 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002501 if (rxcp->num_rcvd == 0)
2502 break;
2503 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002504 }
2505
Sathya Perlad23e9462012-12-17 19:38:51 +00002506 /* After cleanup, leave the CQ in unarmed state */
2507 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508}
2509
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002510static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002511{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002512 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2513 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302514 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002515 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302516 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002517 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002518
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302519 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002520 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002521 pending_txqs = adapter->num_tx_qs;
2522
2523 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302524 cmpl = 0;
2525 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002526 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302527 while ((txcp = be_tx_compl_get(txo))) {
2528 num_wrbs +=
2529 be_tx_compl_process(adapter, txo,
2530 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002531 cmpl++;
2532 }
2533 if (cmpl) {
2534 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2535 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302536 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002537 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302538 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002539 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002540 }
2541
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302542 if (pending_txqs == 0 || ++timeo > 10 ||
2543 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002544 break;
2545
2546 mdelay(1);
2547 } while (true);
2548
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002549 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002550 for_all_tx_queues(adapter, txo, i) {
2551 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002552
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002553 if (atomic_read(&txq->used)) {
2554 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2555 i, atomic_read(&txq->used));
2556 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002557 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002558 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2559 txq->len);
2560 /* Use the tx-compl process logic to handle requests
2561 * that were not sent to the HW.
2562 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002563 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2564 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002565 BUG_ON(atomic_read(&txq->used));
2566 txo->pend_wrb_cnt = 0;
2567 /* Since hw was never notified of these requests,
2568 * reset TXQ indices
2569 */
2570 txq->head = notified_idx;
2571 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002572 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002573 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002574}
2575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576static void be_evt_queues_destroy(struct be_adapter *adapter)
2577{
2578 struct be_eq_obj *eqo;
2579 int i;
2580
2581 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002582 if (eqo->q.created) {
2583 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002584 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302585 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302586 netif_napi_del(&eqo->napi);
Kalesh AP649886a2015-08-05 03:27:50 -04002587 free_cpumask_var(eqo->affinity_mask);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002588 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002589 be_queue_free(adapter, &eqo->q);
2590 }
2591}
2592
2593static int be_evt_queues_create(struct be_adapter *adapter)
2594{
2595 struct be_queue_info *eq;
2596 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302597 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002598 int i, rc;
2599
Sathya Perla92bf14a2013-08-27 16:57:32 +05302600 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2601 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002602
2603 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302604 int numa_node = dev_to_node(&adapter->pdev->dev);
Kalesh AP649886a2015-08-05 03:27:50 -04002605
Sathya Perla2632baf2013-10-01 16:00:00 +05302606 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002607 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002608 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302609 aic->max_eqd = BE_MAX_EQD;
2610 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002611
2612 eq = &eqo->q;
2613 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302614 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002615 if (rc)
2616 return rc;
2617
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302618 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002619 if (rc)
2620 return rc;
Kalesh AP649886a2015-08-05 03:27:50 -04002621
2622 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2623 return -ENOMEM;
2624 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2625 eqo->affinity_mask);
2626 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2627 BE_NAPI_WEIGHT);
2628 napi_hash_add(&eqo->napi);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002629 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002630 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002631}
2632
Sathya Perla5fb379e2009-06-18 00:02:59 +00002633static void be_mcc_queues_destroy(struct be_adapter *adapter)
2634{
2635 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002636
Sathya Perla8788fdc2009-07-27 22:52:03 +00002637 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002638 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002639 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002640 be_queue_free(adapter, q);
2641
Sathya Perla8788fdc2009-07-27 22:52:03 +00002642 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002643 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002644 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002645 be_queue_free(adapter, q);
2646}
2647
2648/* Must be called only after TX qs are created as MCC shares TX EQ */
2649static int be_mcc_queues_create(struct be_adapter *adapter)
2650{
2651 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002652
Sathya Perla8788fdc2009-07-27 22:52:03 +00002653 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002654 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302655 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002656 goto err;
2657
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002658 /* Use the default EQ for MCC completions */
2659 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002660 goto mcc_cq_free;
2661
Sathya Perla8788fdc2009-07-27 22:52:03 +00002662 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002663 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2664 goto mcc_cq_destroy;
2665
Sathya Perla8788fdc2009-07-27 22:52:03 +00002666 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002667 goto mcc_q_free;
2668
2669 return 0;
2670
2671mcc_q_free:
2672 be_queue_free(adapter, q);
2673mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002674 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002675mcc_cq_free:
2676 be_queue_free(adapter, cq);
2677err:
2678 return -1;
2679}
2680
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002681static void be_tx_queues_destroy(struct be_adapter *adapter)
2682{
2683 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002684 struct be_tx_obj *txo;
2685 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002686
Sathya Perla3c8def92011-06-12 20:01:58 +00002687 for_all_tx_queues(adapter, txo, i) {
2688 q = &txo->q;
2689 if (q->created)
2690 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2691 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002692
Sathya Perla3c8def92011-06-12 20:01:58 +00002693 q = &txo->cq;
2694 if (q->created)
2695 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2696 be_queue_free(adapter, q);
2697 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002698}
2699
Sathya Perla77071332013-08-27 16:57:34 +05302700static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002701{
Sathya Perla73f394e2015-03-26 03:05:09 -04002702 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002703 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002704 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302705 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706
Sathya Perla92bf14a2013-08-27 16:57:32 +05302707 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002708
Sathya Perla3c8def92011-06-12 20:01:58 +00002709 for_all_tx_queues(adapter, txo, i) {
2710 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002711 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2712 sizeof(struct be_eth_tx_compl));
2713 if (status)
2714 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002715
John Stultz827da442013-10-07 15:51:58 -07002716 u64_stats_init(&txo->stats.sync);
2717 u64_stats_init(&txo->stats.sync_compl);
2718
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 /* If num_evt_qs is less than num_tx_qs, then more than
2720 * one txq share an eq
2721 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002722 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2723 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002724 if (status)
2725 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002726
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002727 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2728 sizeof(struct be_eth_wrb));
2729 if (status)
2730 return status;
2731
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002732 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002733 if (status)
2734 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002735
2736 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2737 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002738 }
2739
Sathya Perlad3791422012-09-28 04:39:44 +00002740 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2741 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002742 return 0;
2743}
2744
2745static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002746{
2747 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002748 struct be_rx_obj *rxo;
2749 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002750
Sathya Perla3abcded2010-10-03 22:12:27 -07002751 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002752 q = &rxo->cq;
2753 if (q->created)
2754 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2755 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002756 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002757}
2758
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002759static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002760{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002761 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002762 struct be_rx_obj *rxo;
2763 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002764
Sathya Perla92bf14a2013-08-27 16:57:32 +05302765 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002766 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302767
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002768 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2769 if (adapter->num_rss_qs <= 1)
2770 adapter->num_rss_qs = 0;
2771
2772 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2773
2774 /* When the interface is not capable of RSS rings (and there is no
2775 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002776 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002777 if (adapter->num_rx_qs == 0)
2778 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302779
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002780 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002781 for_all_rx_queues(adapter, rxo, i) {
2782 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002783 cq = &rxo->cq;
2784 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302785 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002786 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002787 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002788
John Stultz827da442013-10-07 15:51:58 -07002789 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002790 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2791 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002792 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002793 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002794 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002795
Sathya Perlad3791422012-09-28 04:39:44 +00002796 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002797 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002799}
2800
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002801static irqreturn_t be_intx(int irq, void *dev)
2802{
Sathya Perlae49cc342012-11-27 19:50:02 +00002803 struct be_eq_obj *eqo = dev;
2804 struct be_adapter *adapter = eqo->adapter;
2805 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002806
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002807 /* IRQ is not expected when NAPI is scheduled as the EQ
2808 * will not be armed.
2809 * But, this can happen on Lancer INTx where it takes
2810 * a while to de-assert INTx or in BE2 where occasionaly
2811 * an interrupt may be raised even when EQ is unarmed.
2812 * If NAPI is already scheduled, then counting & notifying
2813 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002814 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002815 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002816 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002817 __napi_schedule(&eqo->napi);
2818 if (num_evts)
2819 eqo->spurious_intr = 0;
2820 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002821 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002822
2823 /* Return IRQ_HANDLED only for the the first spurious intr
2824 * after a valid intr to stop the kernel from branding
2825 * this irq as a bad one!
2826 */
2827 if (num_evts || eqo->spurious_intr++ == 0)
2828 return IRQ_HANDLED;
2829 else
2830 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002831}
2832
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002833static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002834{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002835 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002836
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002837 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002838 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002839 return IRQ_HANDLED;
2840}
2841
Sathya Perla2e588f82011-03-11 02:49:26 +00002842static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002843{
Somnath Koture38b1702013-05-29 22:55:56 +00002844 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002845}
2846
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002847static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302848 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002849{
Sathya Perla3abcded2010-10-03 22:12:27 -07002850 struct be_adapter *adapter = rxo->adapter;
2851 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002852 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302854 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002855
2856 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002857 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002858 if (!rxcp)
2859 break;
2860
Sathya Perla12004ae2011-08-02 19:57:46 +00002861 /* Is it a flush compl that has no data */
2862 if (unlikely(rxcp->num_rcvd == 0))
2863 goto loop_continue;
2864
2865 /* Discard compl with partial DMA Lancer B0 */
2866 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002867 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002868 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002869 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002870
Sathya Perla12004ae2011-08-02 19:57:46 +00002871 /* On BE drop pkts that arrive due to imperfect filtering in
2872 * promiscuous mode on some skews
2873 */
2874 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302875 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002876 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002877 goto loop_continue;
2878 }
2879
Sathya Perla6384a4d2013-10-25 10:40:16 +05302880 /* Don't do gro when we're busy_polling */
2881 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002882 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002883 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302884 be_rx_compl_process(rxo, napi, rxcp);
2885
Sathya Perla12004ae2011-08-02 19:57:46 +00002886loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302887 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002888 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002889 }
2890
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002891 if (work_done) {
2892 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002893
Sathya Perla6384a4d2013-10-25 10:40:16 +05302894 /* When an rx-obj gets into post_starved state, just
2895 * let be_worker do the posting.
2896 */
2897 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2898 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302899 be_post_rx_frags(rxo, GFP_ATOMIC,
2900 max_t(u32, MAX_RX_POST,
2901 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002902 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002903
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002904 return work_done;
2905}
2906
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302907static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302908{
2909 switch (status) {
2910 case BE_TX_COMP_HDR_PARSE_ERR:
2911 tx_stats(txo)->tx_hdr_parse_err++;
2912 break;
2913 case BE_TX_COMP_NDMA_ERR:
2914 tx_stats(txo)->tx_dma_err++;
2915 break;
2916 case BE_TX_COMP_ACL_ERR:
2917 tx_stats(txo)->tx_spoof_check_err++;
2918 break;
2919 }
2920}
2921
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302922static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302923{
2924 switch (status) {
2925 case LANCER_TX_COMP_LSO_ERR:
2926 tx_stats(txo)->tx_tso_err++;
2927 break;
2928 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2929 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2930 tx_stats(txo)->tx_spoof_check_err++;
2931 break;
2932 case LANCER_TX_COMP_QINQ_ERR:
2933 tx_stats(txo)->tx_qinq_err++;
2934 break;
2935 case LANCER_TX_COMP_PARITY_ERR:
2936 tx_stats(txo)->tx_internal_parity_err++;
2937 break;
2938 case LANCER_TX_COMP_DMA_ERR:
2939 tx_stats(txo)->tx_dma_err++;
2940 break;
2941 }
2942}
2943
Sathya Perlac8f64612014-09-02 09:56:55 +05302944static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2945 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002946{
Sathya Perlac8f64612014-09-02 09:56:55 +05302947 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302948 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002949
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302950 while ((txcp = be_tx_compl_get(txo))) {
2951 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302952 work_done++;
2953
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302954 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302955 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302956 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302957 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302958 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302959 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002960 }
2961
2962 if (work_done) {
2963 be_cq_notify(adapter, txo->cq.id, true, work_done);
2964 atomic_sub(num_wrbs, &txo->q.used);
2965
2966 /* As Tx wrbs have been freed up, wake up netdev queue
2967 * if it was stopped due to lack of tx wrbs. */
2968 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302969 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002970 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002971 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002972
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002973 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2974 tx_stats(txo)->tx_compl += work_done;
2975 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2976 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002977}
Sathya Perla3c8def92011-06-12 20:01:58 +00002978
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002979#ifdef CONFIG_NET_RX_BUSY_POLL
2980static inline bool be_lock_napi(struct be_eq_obj *eqo)
2981{
2982 bool status = true;
2983
2984 spin_lock(&eqo->lock); /* BH is already disabled */
2985 if (eqo->state & BE_EQ_LOCKED) {
2986 WARN_ON(eqo->state & BE_EQ_NAPI);
2987 eqo->state |= BE_EQ_NAPI_YIELD;
2988 status = false;
2989 } else {
2990 eqo->state = BE_EQ_NAPI;
2991 }
2992 spin_unlock(&eqo->lock);
2993 return status;
2994}
2995
2996static inline void be_unlock_napi(struct be_eq_obj *eqo)
2997{
2998 spin_lock(&eqo->lock); /* BH is already disabled */
2999
3000 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
3001 eqo->state = BE_EQ_IDLE;
3002
3003 spin_unlock(&eqo->lock);
3004}
3005
3006static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3007{
3008 bool status = true;
3009
3010 spin_lock_bh(&eqo->lock);
3011 if (eqo->state & BE_EQ_LOCKED) {
3012 eqo->state |= BE_EQ_POLL_YIELD;
3013 status = false;
3014 } else {
3015 eqo->state |= BE_EQ_POLL;
3016 }
3017 spin_unlock_bh(&eqo->lock);
3018 return status;
3019}
3020
3021static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3022{
3023 spin_lock_bh(&eqo->lock);
3024
3025 WARN_ON(eqo->state & (BE_EQ_NAPI));
3026 eqo->state = BE_EQ_IDLE;
3027
3028 spin_unlock_bh(&eqo->lock);
3029}
3030
3031static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3032{
3033 spin_lock_init(&eqo->lock);
3034 eqo->state = BE_EQ_IDLE;
3035}
3036
3037static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3038{
3039 local_bh_disable();
3040
3041 /* It's enough to just acquire napi lock on the eqo to stop
3042 * be_busy_poll() from processing any queueus.
3043 */
3044 while (!be_lock_napi(eqo))
3045 mdelay(1);
3046
3047 local_bh_enable();
3048}
3049
3050#else /* CONFIG_NET_RX_BUSY_POLL */
3051
3052static inline bool be_lock_napi(struct be_eq_obj *eqo)
3053{
3054 return true;
3055}
3056
3057static inline void be_unlock_napi(struct be_eq_obj *eqo)
3058{
3059}
3060
3061static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3062{
3063 return false;
3064}
3065
3066static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3067{
3068}
3069
3070static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3071{
3072}
3073
3074static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3075{
3076}
3077#endif /* CONFIG_NET_RX_BUSY_POLL */
3078
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303079int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003080{
3081 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3082 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003083 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303084 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303085 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003086 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003087
Sathya Perla0b545a62012-11-23 00:27:18 +00003088 num_evts = events_get(eqo);
3089
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303090 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3091 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003092
Sathya Perla6384a4d2013-10-25 10:40:16 +05303093 if (be_lock_napi(eqo)) {
3094 /* This loop will iterate twice for EQ0 in which
3095 * completions of the last RXQ (default one) are also processed
3096 * For other EQs the loop iterates only once
3097 */
3098 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3099 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3100 max_work = max(work, max_work);
3101 }
3102 be_unlock_napi(eqo);
3103 } else {
3104 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003105 }
3106
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003107 if (is_mcc_eqo(eqo))
3108 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003109
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003110 if (max_work < budget) {
3111 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003112
3113 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3114 * delay via a delay multiplier encoding value
3115 */
3116 if (skyhawk_chip(adapter))
3117 mult_enc = be_get_eq_delay_mult_enc(eqo);
3118
3119 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3120 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003121 } else {
3122 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003123 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003124 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003125 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003126}
3127
Sathya Perla6384a4d2013-10-25 10:40:16 +05303128#ifdef CONFIG_NET_RX_BUSY_POLL
3129static int be_busy_poll(struct napi_struct *napi)
3130{
3131 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3132 struct be_adapter *adapter = eqo->adapter;
3133 struct be_rx_obj *rxo;
3134 int i, work = 0;
3135
3136 if (!be_lock_busy_poll(eqo))
3137 return LL_FLUSH_BUSY;
3138
3139 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3140 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3141 if (work)
3142 break;
3143 }
3144
3145 be_unlock_busy_poll(eqo);
3146 return work;
3147}
3148#endif
3149
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003150void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003151{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003152 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3153 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003154 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303155 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003156
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303157 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003158 return;
3159
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003160 if (lancer_chip(adapter)) {
3161 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3162 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303163 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003164 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303165 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003166 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303167 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303168 /* Do not log error messages if its a FW reset */
3169 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3170 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3171 dev_info(dev, "Firmware update in progress\n");
3172 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303173 dev_err(dev, "Error detected in the card\n");
3174 dev_err(dev, "ERR: sliport status 0x%x\n",
3175 sliport_status);
3176 dev_err(dev, "ERR: sliport error1 0x%x\n",
3177 sliport_err1);
3178 dev_err(dev, "ERR: sliport error2 0x%x\n",
3179 sliport_err2);
3180 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003181 }
3182 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003183 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3184 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3185 ue_lo_mask = ioread32(adapter->pcicfg +
3186 PCICFG_UE_STATUS_LOW_MASK);
3187 ue_hi_mask = ioread32(adapter->pcicfg +
3188 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003189
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003190 ue_lo = (ue_lo & ~ue_lo_mask);
3191 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003192
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303193 /* On certain platforms BE hardware can indicate spurious UEs.
3194 * Allow HW to stop working completely in case of a real UE.
3195 * Hence not setting the hw_error for UE detection.
3196 */
3197
3198 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303199 dev_err(dev,
3200 "Unrecoverable Error detected in the adapter");
3201 dev_err(dev, "Please reboot server to recover");
3202 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303203 be_set_error(adapter, BE_ERROR_UE);
3204
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303205 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3206 if (ue_lo & 1)
3207 dev_err(dev, "UE: %s bit set\n",
3208 ue_status_low_desc[i]);
3209 }
3210 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3211 if (ue_hi & 1)
3212 dev_err(dev, "UE: %s bit set\n",
3213 ue_status_hi_desc[i]);
3214 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303215 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003216 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003217}
3218
Sathya Perla8d56ff12009-11-22 22:02:26 +00003219static void be_msix_disable(struct be_adapter *adapter)
3220{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003221 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003222 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003223 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303224 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003225 }
3226}
3227
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003228static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003229{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003230 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003231 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003232
Sathya Perla92bf14a2013-08-27 16:57:32 +05303233 /* If RoCE is supported, program the max number of NIC vectors that
3234 * may be configured via set-channels, along with vectors needed for
3235 * RoCe. Else, just program the number we'll use initially.
3236 */
3237 if (be_roce_supported(adapter))
3238 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3239 2 * num_online_cpus());
3240 else
3241 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003242
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003243 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003244 adapter->msix_entries[i].entry = i;
3245
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003246 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3247 MIN_MSIX_VECTORS, num_vec);
3248 if (num_vec < 0)
3249 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003250
Sathya Perla92bf14a2013-08-27 16:57:32 +05303251 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3252 adapter->num_msix_roce_vec = num_vec / 2;
3253 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3254 adapter->num_msix_roce_vec);
3255 }
3256
3257 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3258
3259 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3260 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003261 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003262
3263fail:
3264 dev_warn(dev, "MSIx enable failed\n");
3265
3266 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003267 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003268 return num_vec;
3269 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003270}
3271
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003272static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303273 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303275 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003276}
3277
3278static int be_msix_register(struct be_adapter *adapter)
3279{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003280 struct net_device *netdev = adapter->netdev;
3281 struct be_eq_obj *eqo;
3282 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003283
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003284 for_all_evt_queues(adapter, eqo, i) {
3285 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3286 vec = be_msix_vec_get(adapter, eqo);
3287 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003288 if (status)
3289 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003290
3291 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003292 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003293
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003294 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003295err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003296 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3297 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3298 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303299 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003300 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003301 return status;
3302}
3303
3304static int be_irq_register(struct be_adapter *adapter)
3305{
3306 struct net_device *netdev = adapter->netdev;
3307 int status;
3308
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003309 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003310 status = be_msix_register(adapter);
3311 if (status == 0)
3312 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003313 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003314 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003315 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003316 }
3317
Sathya Perlae49cc342012-11-27 19:50:02 +00003318 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003319 netdev->irq = adapter->pdev->irq;
3320 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003321 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003322 if (status) {
3323 dev_err(&adapter->pdev->dev,
3324 "INTx request IRQ failed - err %d\n", status);
3325 return status;
3326 }
3327done:
3328 adapter->isr_registered = true;
3329 return 0;
3330}
3331
3332static void be_irq_unregister(struct be_adapter *adapter)
3333{
3334 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003335 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003336 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003337
3338 if (!adapter->isr_registered)
3339 return;
3340
3341 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003342 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003343 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003344 goto done;
3345 }
3346
3347 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003348 for_all_evt_queues(adapter, eqo, i) {
3349 vec = be_msix_vec_get(adapter, eqo);
3350 irq_set_affinity_hint(vec, NULL);
3351 free_irq(vec, eqo);
3352 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003353
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003354done:
3355 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003356}
3357
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003358static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003359{
3360 struct be_queue_info *q;
3361 struct be_rx_obj *rxo;
3362 int i;
3363
3364 for_all_rx_queues(adapter, rxo, i) {
3365 q = &rxo->q;
3366 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003367 /* If RXQs are destroyed while in an "out of buffer"
3368 * state, there is a possibility of an HW stall on
3369 * Lancer. So, post 64 buffers to each queue to relieve
3370 * the "out of buffer" condition.
3371 * Make sure there's space in the RXQ before posting.
3372 */
3373 if (lancer_chip(adapter)) {
3374 be_rx_cq_clean(rxo);
3375 if (atomic_read(&q->used) == 0)
3376 be_post_rx_frags(rxo, GFP_KERNEL,
3377 MAX_RX_POST);
3378 }
3379
Sathya Perla482c9e72011-06-29 23:33:17 +00003380 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003381 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003382 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003383 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003384 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003385 }
3386}
3387
Kalesh APbcc84142015-08-05 03:27:48 -04003388static void be_disable_if_filters(struct be_adapter *adapter)
3389{
3390 be_cmd_pmac_del(adapter, adapter->if_handle,
3391 adapter->pmac_id[0], 0);
3392
3393 be_clear_uc_list(adapter);
3394
3395 /* The IFACE flags are enabled in the open path and cleared
3396 * in the close path. When a VF gets detached from the host and
3397 * assigned to a VM the following happens:
3398 * - VF's IFACE flags get cleared in the detach path
3399 * - IFACE create is issued by the VF in the attach path
3400 * Due to a bug in the BE3/Skyhawk-R FW
3401 * (Lancer FW doesn't have the bug), the IFACE capability flags
3402 * specified along with the IFACE create cmd issued by a VF are not
3403 * honoured by FW. As a consequence, if a *new* driver
3404 * (that enables/disables IFACE flags in open/close)
3405 * is loaded in the host and an *old* driver is * used by a VM/VF,
3406 * the IFACE gets created *without* the needed flags.
3407 * To avoid this, disable RX-filter flags only for Lancer.
3408 */
3409 if (lancer_chip(adapter)) {
3410 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3411 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3412 }
3413}
3414
Sathya Perla889cd4b2010-05-30 23:33:45 +00003415static int be_close(struct net_device *netdev)
3416{
3417 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003418 struct be_eq_obj *eqo;
3419 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003420
Kalesh APe1ad8e32014-04-14 16:12:41 +05303421 /* This protection is needed as be_close() may be called even when the
3422 * adapter is in cleared state (after eeh perm failure)
3423 */
3424 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3425 return 0;
3426
Kalesh APbcc84142015-08-05 03:27:48 -04003427 be_disable_if_filters(adapter);
3428
Parav Pandit045508a2012-03-26 14:27:13 +00003429 be_roce_dev_close(adapter);
3430
Ivan Veceradff345c52013-11-27 08:59:32 +01003431 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3432 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003433 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303434 be_disable_busy_poll(eqo);
3435 }
David S. Miller71237b62013-11-28 18:53:36 -05003436 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003437 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003438
3439 be_async_mcc_disable(adapter);
3440
3441 /* Wait for all pending tx completions to arrive so that
3442 * all tx skbs are freed.
3443 */
Sathya Perlafba87552013-05-08 02:05:50 +00003444 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303445 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003446
3447 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003448
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003449 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003450 if (msix_enabled(adapter))
3451 synchronize_irq(be_msix_vec_get(adapter, eqo));
3452 else
3453 synchronize_irq(netdev->irq);
3454 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003455 }
3456
Sathya Perla889cd4b2010-05-30 23:33:45 +00003457 be_irq_unregister(adapter);
3458
Sathya Perla482c9e72011-06-29 23:33:17 +00003459 return 0;
3460}
3461
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003462static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003463{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003464 struct rss_info *rss = &adapter->rss_info;
3465 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003466 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003467 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003468
3469 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003470 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3471 sizeof(struct be_eth_rx_d));
3472 if (rc)
3473 return rc;
3474 }
3475
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003476 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3477 rxo = default_rxo(adapter);
3478 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3479 rx_frag_size, adapter->if_handle,
3480 false, &rxo->rss_id);
3481 if (rc)
3482 return rc;
3483 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003484
3485 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003486 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003487 rx_frag_size, adapter->if_handle,
3488 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003489 if (rc)
3490 return rc;
3491 }
3492
3493 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003494 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003495 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303496 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003497 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303498 rss->rsstable[j + i] = rxo->rss_id;
3499 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003500 }
3501 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303502 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3503 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003504
3505 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303506 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3507 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303508 } else {
3509 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303510 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303511 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003512
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003513 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303514 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003515 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303516 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303517 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303518 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003519 }
3520
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003521 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303522
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003523 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3524 * which is a queue empty condition
3525 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003526 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003527 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3528
Sathya Perla889cd4b2010-05-30 23:33:45 +00003529 return 0;
3530}
3531
Kalesh APbcc84142015-08-05 03:27:48 -04003532static int be_enable_if_filters(struct be_adapter *adapter)
3533{
3534 int status;
3535
3536 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3537 if (status)
3538 return status;
3539
3540 /* For BE3 VFs, the PF programs the initial MAC address */
3541 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3542 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3543 adapter->if_handle,
3544 &adapter->pmac_id[0], 0);
3545 if (status)
3546 return status;
3547 }
3548
3549 if (adapter->vlans_added)
3550 be_vid_config(adapter);
3551
3552 be_set_rx_mode(adapter->netdev);
3553
3554 return 0;
3555}
3556
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003557static int be_open(struct net_device *netdev)
3558{
3559 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003560 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003561 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003562 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003563 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003564 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003565
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003566 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003567 if (status)
3568 goto err;
3569
Kalesh APbcc84142015-08-05 03:27:48 -04003570 status = be_enable_if_filters(adapter);
3571 if (status)
3572 goto err;
3573
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003574 status = be_irq_register(adapter);
3575 if (status)
3576 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003577
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003578 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003579 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003580
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003581 for_all_tx_queues(adapter, txo, i)
3582 be_cq_notify(adapter, txo->cq.id, true, 0);
3583
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003584 be_async_mcc_enable(adapter);
3585
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003586 for_all_evt_queues(adapter, eqo, i) {
3587 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303588 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003589 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003590 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003591 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003592
Sathya Perla323ff712012-09-28 04:39:43 +00003593 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003594 if (!status)
3595 be_link_status_update(adapter, link_status);
3596
Sathya Perlafba87552013-05-08 02:05:50 +00003597 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003598 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303599
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303600#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303601 if (skyhawk_chip(adapter))
3602 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303603#endif
3604
Sathya Perla889cd4b2010-05-30 23:33:45 +00003605 return 0;
3606err:
3607 be_close(adapter->netdev);
3608 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003609}
3610
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003611static int be_setup_wol(struct be_adapter *adapter, bool enable)
3612{
3613 struct be_dma_mem cmd;
3614 int status = 0;
3615 u8 mac[ETH_ALEN];
3616
Joe Perchesc7bf7162015-03-02 19:54:47 -08003617 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003618
3619 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07003620 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3621 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303622 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303623 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003624
3625 if (enable) {
3626 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303627 PCICFG_PM_CONTROL_OFFSET,
3628 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003629 if (status) {
3630 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003631 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003632 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3633 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003634 return status;
3635 }
3636 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303637 adapter->netdev->dev_addr,
3638 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003639 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3640 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3641 } else {
3642 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3643 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3644 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3645 }
3646
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003647 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003648 return status;
3649}
3650
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003651static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3652{
3653 u32 addr;
3654
3655 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3656
3657 mac[5] = (u8)(addr & 0xFF);
3658 mac[4] = (u8)((addr >> 8) & 0xFF);
3659 mac[3] = (u8)((addr >> 16) & 0xFF);
3660 /* Use the OUI from the current MAC address */
3661 memcpy(mac, adapter->netdev->dev_addr, 3);
3662}
3663
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003664/*
3665 * Generate a seed MAC address from the PF MAC Address using jhash.
3666 * MAC Address for VFs are assigned incrementally starting from the seed.
3667 * These addresses are programmed in the ASIC by the PF and the VF driver
3668 * queries for the MAC address during its probe.
3669 */
Sathya Perla4c876612013-02-03 20:30:11 +00003670static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003671{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003672 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003673 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003674 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003675 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003676
3677 be_vf_eth_addr_generate(adapter, mac);
3678
Sathya Perla11ac75e2011-12-13 00:58:50 +00003679 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303680 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003681 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003682 vf_cfg->if_handle,
3683 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303684 else
3685 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3686 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003687
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003688 if (status)
3689 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303690 "Mac address assignment failed for VF %d\n",
3691 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003692 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003693 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003694
3695 mac[5] += 1;
3696 }
3697 return status;
3698}
3699
Sathya Perla4c876612013-02-03 20:30:11 +00003700static int be_vfs_mac_query(struct be_adapter *adapter)
3701{
3702 int status, vf;
3703 u8 mac[ETH_ALEN];
3704 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003705
3706 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303707 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3708 mac, vf_cfg->if_handle,
3709 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003710 if (status)
3711 return status;
3712 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3713 }
3714 return 0;
3715}
3716
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003717static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003718{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003719 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003720 u32 vf;
3721
Sathya Perla257a3fe2013-06-14 15:54:51 +05303722 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003723 dev_warn(&adapter->pdev->dev,
3724 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003725 goto done;
3726 }
3727
Sathya Perlab4c1df92013-05-08 02:05:47 +00003728 pci_disable_sriov(adapter->pdev);
3729
Sathya Perla11ac75e2011-12-13 00:58:50 +00003730 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303731 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003732 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3733 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303734 else
3735 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3736 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003737
Sathya Perla11ac75e2011-12-13 00:58:50 +00003738 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3739 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003740done:
3741 kfree(adapter->vf_cfg);
3742 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303743 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003744}
3745
Sathya Perla77071332013-08-27 16:57:34 +05303746static void be_clear_queues(struct be_adapter *adapter)
3747{
3748 be_mcc_queues_destroy(adapter);
3749 be_rx_cqs_destroy(adapter);
3750 be_tx_queues_destroy(adapter);
3751 be_evt_queues_destroy(adapter);
3752}
3753
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303754static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003755{
Sathya Perla191eb752012-02-23 18:50:13 +00003756 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3757 cancel_delayed_work_sync(&adapter->work);
3758 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3759 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303760}
3761
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003762static void be_cancel_err_detection(struct be_adapter *adapter)
3763{
3764 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3765 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3766 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3767 }
3768}
3769
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303770#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303771static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3772{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003773 struct net_device *netdev = adapter->netdev;
3774
Sathya Perlac9c47142014-03-27 10:46:19 +05303775 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3776 be_cmd_manage_iface(adapter, adapter->if_handle,
3777 OP_CONVERT_TUNNEL_TO_NORMAL);
3778
3779 if (adapter->vxlan_port)
3780 be_cmd_set_vxlan_port(adapter, 0);
3781
3782 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3783 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003784
3785 netdev->hw_enc_features = 0;
3786 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303787 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303788}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303789#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303790
Vasundhara Volamf2858732015-03-04 00:44:33 -05003791static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3792{
3793 struct be_resources res = adapter->pool_res;
3794 u16 num_vf_qs = 1;
3795
3796 /* Distribute the queue resources equally among the PF and it's VFs
3797 * Do not distribute queue resources in multi-channel configuration.
3798 */
3799 if (num_vfs && !be_is_mc(adapter)) {
3800 /* If number of VFs requested is 8 less than max supported,
3801 * assign 8 queue pairs to the PF and divide the remaining
3802 * resources evenly among the VFs
3803 */
3804 if (num_vfs < (be_max_vfs(adapter) - 8))
3805 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3806 else
3807 num_vf_qs = res.max_rss_qs / num_vfs;
3808
3809 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3810 * interfaces per port. Provide RSS on VFs, only if number
3811 * of VFs requested is less than MAX_RSS_IFACES limit.
3812 */
3813 if (num_vfs >= MAX_RSS_IFACES)
3814 num_vf_qs = 1;
3815 }
3816 return num_vf_qs;
3817}
3818
Somnath Koturb05004a2013-12-05 12:08:16 +05303819static int be_clear(struct be_adapter *adapter)
3820{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003821 struct pci_dev *pdev = adapter->pdev;
3822 u16 num_vf_qs;
3823
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303824 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003825
Sathya Perla11ac75e2011-12-13 00:58:50 +00003826 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003827 be_vf_clear(adapter);
3828
Vasundhara Volambec84e62014-06-30 13:01:32 +05303829 /* Re-configure FW to distribute resources evenly across max-supported
3830 * number of VFs, only when VFs are not already enabled.
3831 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003832 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3833 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003834 num_vf_qs = be_calculate_vf_qs(adapter,
3835 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303836 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003837 pci_sriov_get_totalvfs(pdev),
3838 num_vf_qs);
3839 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303840
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303841#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303842 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303843#endif
Kalesh APbcc84142015-08-05 03:27:48 -04003844 kfree(adapter->pmac_id);
3845 adapter->pmac_id = NULL;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003846
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003847 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003848
Sathya Perla77071332013-08-27 16:57:34 +05303849 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003850
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003851 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303852 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003853 return 0;
3854}
3855
Sathya Perla4c876612013-02-03 20:30:11 +00003856static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003857{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303858 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04003859 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00003860 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003861 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003862
Kalesh AP0700d812015-01-20 03:51:43 -05003863 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003864 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003865 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003866
Sathya Perla4c876612013-02-03 20:30:11 +00003867 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303868 if (!BE3_chip(adapter)) {
3869 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003870 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303871 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003872 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303873 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003874 /* Prevent VFs from enabling VLAN promiscuous
3875 * mode
3876 */
3877 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3878 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303879 }
Sathya Perla4c876612013-02-03 20:30:11 +00003880
Kalesh APbcc84142015-08-05 03:27:48 -04003881 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3882 BE_IF_FLAGS_BROADCAST |
3883 BE_IF_FLAGS_MULTICAST |
3884 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3885 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3886 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003887 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003888 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003889 }
Kalesh AP0700d812015-01-20 03:51:43 -05003890
3891 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003892}
3893
Sathya Perla39f1d942012-05-08 19:41:24 +00003894static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003895{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003896 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003897 int vf;
3898
Sathya Perla39f1d942012-05-08 19:41:24 +00003899 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3900 GFP_KERNEL);
3901 if (!adapter->vf_cfg)
3902 return -ENOMEM;
3903
Sathya Perla11ac75e2011-12-13 00:58:50 +00003904 for_all_vfs(adapter, vf_cfg, vf) {
3905 vf_cfg->if_handle = -1;
3906 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003907 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003908 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003909}
3910
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003911static int be_vf_setup(struct be_adapter *adapter)
3912{
Sathya Perla4c876612013-02-03 20:30:11 +00003913 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303914 struct be_vf_cfg *vf_cfg;
3915 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003916 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003917
Sathya Perla257a3fe2013-06-14 15:54:51 +05303918 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003919
3920 status = be_vf_setup_init(adapter);
3921 if (status)
3922 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003923
Sathya Perla4c876612013-02-03 20:30:11 +00003924 if (old_vfs) {
3925 for_all_vfs(adapter, vf_cfg, vf) {
3926 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3927 if (status)
3928 goto err;
3929 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003930
Sathya Perla4c876612013-02-03 20:30:11 +00003931 status = be_vfs_mac_query(adapter);
3932 if (status)
3933 goto err;
3934 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303935 status = be_vfs_if_create(adapter);
3936 if (status)
3937 goto err;
3938
Sathya Perla39f1d942012-05-08 19:41:24 +00003939 status = be_vf_eth_addr_config(adapter);
3940 if (status)
3941 goto err;
3942 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003943
Sathya Perla11ac75e2011-12-13 00:58:50 +00003944 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303945 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003946 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3947 vf + 1);
3948 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303949 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003950 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303951 BE_PRIV_FILTMGMT,
3952 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003953 if (!status) {
3954 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303955 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3956 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003957 }
Sathya Perla04a06022013-07-23 15:25:00 +05303958 }
3959
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303960 /* Allow full available bandwidth */
3961 if (!old_vfs)
3962 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003963
Kalesh APe7bcbd72015-05-06 05:30:32 -04003964 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3965 vf_cfg->if_handle, NULL,
3966 &spoofchk);
3967 if (!status)
3968 vf_cfg->spoofchk = spoofchk;
3969
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303970 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303971 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303972 be_cmd_set_logical_link_config(adapter,
3973 IFLA_VF_LINK_STATE_AUTO,
3974 vf+1);
3975 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003976 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003977
3978 if (!old_vfs) {
3979 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3980 if (status) {
3981 dev_err(dev, "SRIOV enable failed\n");
3982 adapter->num_vfs = 0;
3983 goto err;
3984 }
3985 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303986
3987 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003988 return 0;
3989err:
Sathya Perla4c876612013-02-03 20:30:11 +00003990 dev_err(dev, "VF setup failed\n");
3991 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003992 return status;
3993}
3994
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303995/* Converting function_mode bits on BE3 to SH mc_type enums */
3996
3997static u8 be_convert_mc_type(u32 function_mode)
3998{
Suresh Reddy66064db2014-06-23 16:41:29 +05303999 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304000 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05304001 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304002 return FLEX10;
4003 else if (function_mode & VNIC_MODE)
4004 return vNIC2;
4005 else if (function_mode & UMC_ENABLED)
4006 return UMC;
4007 else
4008 return MC_NONE;
4009}
4010
Sathya Perla92bf14a2013-08-27 16:57:32 +05304011/* On BE2/BE3 FW does not suggest the supported limits */
4012static void BEx_get_resources(struct be_adapter *adapter,
4013 struct be_resources *res)
4014{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304015 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304016
4017 if (be_physfn(adapter))
4018 res->max_uc_mac = BE_UC_PMAC_COUNT;
4019 else
4020 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4021
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304022 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4023
4024 if (be_is_mc(adapter)) {
4025 /* Assuming that there are 4 channels per port,
4026 * when multi-channel is enabled
4027 */
4028 if (be_is_qnq_mode(adapter))
4029 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4030 else
4031 /* In a non-qnq multichannel mode, the pvid
4032 * takes up one vlan entry
4033 */
4034 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4035 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304036 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304037 }
4038
Sathya Perla92bf14a2013-08-27 16:57:32 +05304039 res->max_mcast_mac = BE_MAX_MC;
4040
Vasundhara Volama5243da2014-03-11 18:53:07 +05304041 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4042 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4043 * *only* if it is RSS-capable.
4044 */
4045 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004046 be_virtfn(adapter) ||
4047 (be_is_mc(adapter) &&
4048 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304049 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304050 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4051 struct be_resources super_nic_res = {0};
4052
4053 /* On a SuperNIC profile, the driver needs to use the
4054 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4055 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05004056 be_cmd_get_profile_config(adapter, &super_nic_res,
4057 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304058 /* Some old versions of BE3 FW don't report max_tx_qs value */
4059 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4060 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304061 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304062 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304063
4064 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4065 !use_sriov && be_physfn(adapter))
4066 res->max_rss_qs = (adapter->be3_native) ?
4067 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4068 res->max_rx_qs = res->max_rss_qs + 1;
4069
Suresh Reddye3dc8672014-01-06 13:02:25 +05304070 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304071 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304072 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4073 else
4074 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304075
4076 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004077 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304078 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4079 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4080}
4081
Sathya Perla30128032011-11-10 19:17:57 +00004082static void be_setup_init(struct be_adapter *adapter)
4083{
4084 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004085 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004086 adapter->if_handle = -1;
4087 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004088 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004089 if (be_physfn(adapter))
4090 adapter->cmd_privileges = MAX_PRIVILEGES;
4091 else
4092 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004093}
4094
Vasundhara Volambec84e62014-06-30 13:01:32 +05304095static int be_get_sriov_config(struct be_adapter *adapter)
4096{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304097 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304098 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304099
Vasundhara Volamf2858732015-03-04 00:44:33 -05004100 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304101
Vasundhara Volamace40af2015-03-04 00:44:34 -05004102 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304103 if (BE3_chip(adapter) && !res.max_vfs) {
4104 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4105 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4106 }
4107
Sathya Perlad3d18312014-08-01 17:47:30 +05304108 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304109
Vasundhara Volamace40af2015-03-04 00:44:34 -05004110 /* If during previous unload of the driver, the VFs were not disabled,
4111 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4112 * Instead use the TotalVFs value stored in the pci-dev struct.
4113 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304114 old_vfs = pci_num_vf(adapter->pdev);
4115 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004116 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4117 old_vfs);
4118
4119 adapter->pool_res.max_vfs =
4120 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304121 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304122 }
4123
4124 return 0;
4125}
4126
Vasundhara Volamace40af2015-03-04 00:44:34 -05004127static void be_alloc_sriov_res(struct be_adapter *adapter)
4128{
4129 int old_vfs = pci_num_vf(adapter->pdev);
4130 u16 num_vf_qs;
4131 int status;
4132
4133 be_get_sriov_config(adapter);
4134
4135 if (!old_vfs)
4136 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4137
4138 /* When the HW is in SRIOV capable configuration, the PF-pool
4139 * resources are given to PF during driver load, if there are no
4140 * old VFs. This facility is not available in BE3 FW.
4141 * Also, this is done by FW in Lancer chip.
4142 */
4143 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4144 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4145 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4146 num_vf_qs);
4147 if (status)
4148 dev_err(&adapter->pdev->dev,
4149 "Failed to optimize SRIOV resources\n");
4150 }
4151}
4152
Sathya Perla92bf14a2013-08-27 16:57:32 +05304153static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004154{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304155 struct device *dev = &adapter->pdev->dev;
4156 struct be_resources res = {0};
4157 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004158
Sathya Perla92bf14a2013-08-27 16:57:32 +05304159 if (BEx_chip(adapter)) {
4160 BEx_get_resources(adapter, &res);
4161 adapter->res = res;
4162 }
4163
Sathya Perla92bf14a2013-08-27 16:57:32 +05304164 /* For Lancer, SH etc read per-function resource limits from FW.
4165 * GET_FUNC_CONFIG returns per function guaranteed limits.
4166 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4167 */
Sathya Perla4c876612013-02-03 20:30:11 +00004168 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304169 status = be_cmd_get_func_config(adapter, &res);
4170 if (status)
4171 return status;
4172
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004173 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4174 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4175 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4176 res.max_rss_qs -= 1;
4177
Sathya Perla92bf14a2013-08-27 16:57:32 +05304178 /* If RoCE may be enabled stash away half the EQs for RoCE */
4179 if (be_roce_supported(adapter))
4180 res.max_evt_qs /= 2;
4181 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004182 }
4183
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004184 /* If FW supports RSS default queue, then skip creating non-RSS
4185 * queue for non-IP traffic.
4186 */
4187 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4188 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4189
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304190 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4191 be_max_txqs(adapter), be_max_rxqs(adapter),
4192 be_max_rss(adapter), be_max_eqs(adapter),
4193 be_max_vfs(adapter));
4194 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4195 be_max_uc(adapter), be_max_mc(adapter),
4196 be_max_vlans(adapter));
4197
Vasundhara Volamace40af2015-03-04 00:44:34 -05004198 /* Sanitize cfg_num_qs based on HW and platform limits */
4199 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4200 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304201 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004202}
4203
Sathya Perla39f1d942012-05-08 19:41:24 +00004204static int be_get_config(struct be_adapter *adapter)
4205{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004206 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304207 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004208
4209 status = be_cmd_get_cntl_attributes(adapter);
4210 if (status)
4211 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00004212
Kalesh APe97e3cd2014-07-17 16:20:26 +05304213 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004214 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304215 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004216
Sathya Perla6b085ba2015-02-23 04:20:09 -05004217 if (BEx_chip(adapter)) {
4218 level = be_cmd_get_fw_log_level(adapter);
4219 adapter->msg_enable =
4220 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4221 }
4222
4223 be_cmd_get_acpi_wol_cap(adapter);
4224
Vasundhara Volam21252372015-02-06 08:18:42 -05004225 be_cmd_query_port_name(adapter);
4226
4227 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304228 status = be_cmd_get_active_profile(adapter, &profile_id);
4229 if (!status)
4230 dev_info(&adapter->pdev->dev,
4231 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304232 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304233
Sathya Perla92bf14a2013-08-27 16:57:32 +05304234 status = be_get_resources(adapter);
4235 if (status)
4236 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004237
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304238 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4239 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304240 if (!adapter->pmac_id)
4241 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004242
Sathya Perla92bf14a2013-08-27 16:57:32 +05304243 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004244}
4245
Sathya Perla95046b92013-07-23 15:25:02 +05304246static int be_mac_setup(struct be_adapter *adapter)
4247{
4248 u8 mac[ETH_ALEN];
4249 int status;
4250
4251 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4252 status = be_cmd_get_perm_mac(adapter, mac);
4253 if (status)
4254 return status;
4255
4256 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4257 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304258 }
4259
Sathya Perla95046b92013-07-23 15:25:02 +05304260 return 0;
4261}
4262
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304263static void be_schedule_worker(struct be_adapter *adapter)
4264{
4265 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4266 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4267}
4268
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004269static void be_schedule_err_detection(struct be_adapter *adapter)
4270{
4271 schedule_delayed_work(&adapter->be_err_detection_work,
4272 msecs_to_jiffies(1000));
4273 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4274}
4275
Sathya Perla77071332013-08-27 16:57:34 +05304276static int be_setup_queues(struct be_adapter *adapter)
4277{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304278 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304279 int status;
4280
4281 status = be_evt_queues_create(adapter);
4282 if (status)
4283 goto err;
4284
4285 status = be_tx_qs_create(adapter);
4286 if (status)
4287 goto err;
4288
4289 status = be_rx_cqs_create(adapter);
4290 if (status)
4291 goto err;
4292
4293 status = be_mcc_queues_create(adapter);
4294 if (status)
4295 goto err;
4296
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304297 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4298 if (status)
4299 goto err;
4300
4301 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4302 if (status)
4303 goto err;
4304
Sathya Perla77071332013-08-27 16:57:34 +05304305 return 0;
4306err:
4307 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4308 return status;
4309}
4310
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304311int be_update_queues(struct be_adapter *adapter)
4312{
4313 struct net_device *netdev = adapter->netdev;
4314 int status;
4315
4316 if (netif_running(netdev))
4317 be_close(netdev);
4318
4319 be_cancel_worker(adapter);
4320
4321 /* If any vectors have been shared with RoCE we cannot re-program
4322 * the MSIx table.
4323 */
4324 if (!adapter->num_msix_roce_vec)
4325 be_msix_disable(adapter);
4326
4327 be_clear_queues(adapter);
4328
4329 if (!msix_enabled(adapter)) {
4330 status = be_msix_enable(adapter);
4331 if (status)
4332 return status;
4333 }
4334
4335 status = be_setup_queues(adapter);
4336 if (status)
4337 return status;
4338
4339 be_schedule_worker(adapter);
4340
4341 if (netif_running(netdev))
4342 status = be_open(netdev);
4343
4344 return status;
4345}
4346
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004347static inline int fw_major_num(const char *fw_ver)
4348{
4349 int fw_major = 0, i;
4350
4351 i = sscanf(fw_ver, "%d.", &fw_major);
4352 if (i != 1)
4353 return 0;
4354
4355 return fw_major;
4356}
4357
Sathya Perlaf962f842015-02-23 04:20:16 -05004358/* If any VFs are already enabled don't FLR the PF */
4359static bool be_reset_required(struct be_adapter *adapter)
4360{
4361 return pci_num_vf(adapter->pdev) ? false : true;
4362}
4363
4364/* Wait for the FW to be ready and perform the required initialization */
4365static int be_func_init(struct be_adapter *adapter)
4366{
4367 int status;
4368
4369 status = be_fw_wait_ready(adapter);
4370 if (status)
4371 return status;
4372
4373 if (be_reset_required(adapter)) {
4374 status = be_cmd_reset_function(adapter);
4375 if (status)
4376 return status;
4377
4378 /* Wait for interrupts to quiesce after an FLR */
4379 msleep(100);
4380
4381 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304382 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004383 }
4384
4385 /* Tell FW we're ready to fire cmds */
4386 status = be_cmd_fw_init(adapter);
4387 if (status)
4388 return status;
4389
4390 /* Allow interrupts for other ULPs running on NIC function */
4391 be_intr_set(adapter, true);
4392
4393 return 0;
4394}
4395
Sathya Perla5fb379e2009-06-18 00:02:59 +00004396static int be_setup(struct be_adapter *adapter)
4397{
Sathya Perla39f1d942012-05-08 19:41:24 +00004398 struct device *dev = &adapter->pdev->dev;
Kalesh APbcc84142015-08-05 03:27:48 -04004399 u32 en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004400 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004401
Sathya Perlaf962f842015-02-23 04:20:16 -05004402 status = be_func_init(adapter);
4403 if (status)
4404 return status;
4405
Sathya Perla30128032011-11-10 19:17:57 +00004406 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004407
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004408 if (!lancer_chip(adapter))
4409 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004410
Vasundhara Volamace40af2015-03-04 00:44:34 -05004411 if (!BE2_chip(adapter) && be_physfn(adapter))
4412 be_alloc_sriov_res(adapter);
4413
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004414 status = be_get_config(adapter);
4415 if (status)
4416 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004417
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004418 status = be_msix_enable(adapter);
4419 if (status)
4420 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004421
Kalesh APbcc84142015-08-05 03:27:48 -04004422 /* will enable all the needed filter flags in be_open() */
4423 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4424 en_flags = en_flags & be_if_cap_flags(adapter);
4425 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4426 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004427 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004428 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004429
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304430 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4431 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304432 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304433 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004434 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004435 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004436
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004437 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004438
Sathya Perla95046b92013-07-23 15:25:02 +05304439 status = be_mac_setup(adapter);
4440 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004441 goto err;
4442
Kalesh APe97e3cd2014-07-17 16:20:26 +05304443 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304444 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004445
Somnath Koture9e2a902013-10-24 14:37:53 +05304446 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304447 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304448 adapter->fw_ver);
4449 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4450 }
4451
Kalesh AP00d594c2015-01-20 03:51:44 -05004452 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4453 adapter->rx_fc);
4454 if (status)
4455 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4456 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004457
Kalesh AP00d594c2015-01-20 03:51:44 -05004458 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4459 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004460
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304461 if (be_physfn(adapter))
4462 be_cmd_set_logical_link_config(adapter,
4463 IFLA_VF_LINK_STATE_AUTO, 0);
4464
Vasundhara Volambec84e62014-06-30 13:01:32 +05304465 if (adapter->num_vfs)
4466 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004467
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004468 status = be_cmd_get_phy_info(adapter);
4469 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004470 adapter->phy.fc_autoneg = 1;
4471
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304472 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304473 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004474 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004475err:
4476 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004477 return status;
4478}
4479
Ivan Vecera66268732011-12-08 01:31:21 +00004480#ifdef CONFIG_NET_POLL_CONTROLLER
4481static void be_netpoll(struct net_device *netdev)
4482{
4483 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004484 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004485 int i;
4486
Sathya Perlae49cc342012-11-27 19:50:02 +00004487 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004488 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004489 napi_schedule(&eqo->napi);
4490 }
Ivan Vecera66268732011-12-08 01:31:21 +00004491}
4492#endif
4493
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304494static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004495
Sathya Perla306f1342011-08-02 19:57:45 +00004496static bool phy_flashing_required(struct be_adapter *adapter)
4497{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004498 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004499 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004500}
4501
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004502static bool is_comp_in_ufi(struct be_adapter *adapter,
4503 struct flash_section_info *fsec, int type)
4504{
4505 int i = 0, img_type = 0;
4506 struct flash_section_info_g2 *fsec_g2 = NULL;
4507
Sathya Perlaca34fe32012-11-06 17:48:56 +00004508 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004509 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4510
4511 for (i = 0; i < MAX_FLASH_COMP; i++) {
4512 if (fsec_g2)
4513 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4514 else
4515 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4516
4517 if (img_type == type)
4518 return true;
4519 }
4520 return false;
4521
4522}
4523
Jingoo Han4188e7d2013-08-05 18:02:02 +09004524static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304525 int header_size,
4526 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004527{
4528 struct flash_section_info *fsec = NULL;
4529 const u8 *p = fw->data;
4530
4531 p += header_size;
4532 while (p < (fw->data + fw->size)) {
4533 fsec = (struct flash_section_info *)p;
4534 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4535 return fsec;
4536 p += 32;
4537 }
4538 return NULL;
4539}
4540
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304541static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4542 u32 img_offset, u32 img_size, int hdr_size,
4543 u16 img_optype, bool *crc_match)
4544{
4545 u32 crc_offset;
4546 int status;
4547 u8 crc[4];
4548
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004549 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4550 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304551 if (status)
4552 return status;
4553
4554 crc_offset = hdr_size + img_offset + img_size - 4;
4555
4556 /* Skip flashing, if crc of flashed region matches */
4557 if (!memcmp(crc, p + crc_offset, 4))
4558 *crc_match = true;
4559 else
4560 *crc_match = false;
4561
4562 return status;
4563}
4564
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004565static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004566 struct be_dma_mem *flash_cmd, int optype, int img_size,
4567 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004568{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004569 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004570 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304571 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004572
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004573 while (total_bytes) {
4574 num_bytes = min_t(u32, 32*1024, total_bytes);
4575
4576 total_bytes -= num_bytes;
4577
4578 if (!total_bytes) {
4579 if (optype == OPTYPE_PHY_FW)
4580 flash_op = FLASHROM_OPER_PHY_FLASH;
4581 else
4582 flash_op = FLASHROM_OPER_FLASH;
4583 } else {
4584 if (optype == OPTYPE_PHY_FW)
4585 flash_op = FLASHROM_OPER_PHY_SAVE;
4586 else
4587 flash_op = FLASHROM_OPER_SAVE;
4588 }
4589
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004590 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004591 img += num_bytes;
4592 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004593 flash_op, img_offset +
4594 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304595 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304596 optype == OPTYPE_PHY_FW)
4597 break;
4598 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004599 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004600
4601 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004602 }
4603 return 0;
4604}
4605
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004606/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004607static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304608 const struct firmware *fw,
4609 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004610{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004611 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304612 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004613 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304614 int status, i, filehdr_size, num_comp;
4615 const struct flash_comp *pflashcomp;
4616 bool crc_match;
4617 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004618
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004619 struct flash_comp gen3_flash_types[] = {
4620 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4621 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4622 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4623 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4624 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4625 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4626 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4627 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4628 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4629 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4630 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4631 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4632 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4633 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4634 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4635 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4636 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4637 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4638 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4639 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004640 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004641
4642 struct flash_comp gen2_flash_types[] = {
4643 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4644 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4645 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4646 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4647 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4648 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4649 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4650 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4651 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4652 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4653 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4654 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4655 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4656 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4657 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4658 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004659 };
4660
Sathya Perlaca34fe32012-11-06 17:48:56 +00004661 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004662 pflashcomp = gen3_flash_types;
4663 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004664 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004665 } else {
4666 pflashcomp = gen2_flash_types;
4667 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004668 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004669 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004670 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004671
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004672 /* Get flash section info*/
4673 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4674 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304675 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004676 return -1;
4677 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004678 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004679 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004680 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004681
4682 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4683 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4684 continue;
4685
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004686 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4687 !phy_flashing_required(adapter))
4688 continue;
4689
4690 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304691 status = be_check_flash_crc(adapter, fw->data,
4692 pflashcomp[i].offset,
4693 pflashcomp[i].size,
4694 filehdr_size +
4695 img_hdrs_size,
4696 OPTYPE_REDBOOT, &crc_match);
4697 if (status) {
4698 dev_err(dev,
4699 "Could not get CRC for 0x%x region\n",
4700 pflashcomp[i].optype);
4701 continue;
4702 }
4703
4704 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004705 continue;
4706 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004707
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304708 p = fw->data + filehdr_size + pflashcomp[i].offset +
4709 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004710 if (p + pflashcomp[i].size > fw->data + fw->size)
4711 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004712
4713 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004714 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004715 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304716 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004717 pflashcomp[i].img_type);
4718 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004719 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004720 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004721 return 0;
4722}
4723
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304724static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4725{
4726 u32 img_type = le32_to_cpu(fsec_entry.type);
4727 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4728
4729 if (img_optype != 0xFFFF)
4730 return img_optype;
4731
4732 switch (img_type) {
4733 case IMAGE_FIRMWARE_iSCSI:
4734 img_optype = OPTYPE_ISCSI_ACTIVE;
4735 break;
4736 case IMAGE_BOOT_CODE:
4737 img_optype = OPTYPE_REDBOOT;
4738 break;
4739 case IMAGE_OPTION_ROM_ISCSI:
4740 img_optype = OPTYPE_BIOS;
4741 break;
4742 case IMAGE_OPTION_ROM_PXE:
4743 img_optype = OPTYPE_PXE_BIOS;
4744 break;
4745 case IMAGE_OPTION_ROM_FCoE:
4746 img_optype = OPTYPE_FCOE_BIOS;
4747 break;
4748 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4749 img_optype = OPTYPE_ISCSI_BACKUP;
4750 break;
4751 case IMAGE_NCSI:
4752 img_optype = OPTYPE_NCSI_FW;
4753 break;
4754 case IMAGE_FLASHISM_JUMPVECTOR:
4755 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4756 break;
4757 case IMAGE_FIRMWARE_PHY:
4758 img_optype = OPTYPE_SH_PHY_FW;
4759 break;
4760 case IMAGE_REDBOOT_DIR:
4761 img_optype = OPTYPE_REDBOOT_DIR;
4762 break;
4763 case IMAGE_REDBOOT_CONFIG:
4764 img_optype = OPTYPE_REDBOOT_CONFIG;
4765 break;
4766 case IMAGE_UFI_DIR:
4767 img_optype = OPTYPE_UFI_DIR;
4768 break;
4769 default:
4770 break;
4771 }
4772
4773 return img_optype;
4774}
4775
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004776static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304777 const struct firmware *fw,
4778 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004779{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004780 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004781 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304782 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004783 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304784 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004785 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304786 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304787 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004788
4789 filehdr_size = sizeof(struct flash_file_hdr_g3);
4790 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4791 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304792 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304793 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004794 }
4795
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004796retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004797 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4798 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4799 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304800 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4801 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4802 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004803
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304804 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004805 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004806
4807 if (flash_offset_support)
4808 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4809 else
4810 flash_optype = img_optype;
4811
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304812 /* Don't bother verifying CRC if an old FW image is being
4813 * flashed
4814 */
4815 if (old_fw_img)
4816 goto flash;
4817
4818 status = be_check_flash_crc(adapter, fw->data, img_offset,
4819 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004820 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304821 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304822 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4823 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004824 /* The current FW image on the card does not support
4825 * OFFSET based flashing. Retry using older mechanism
4826 * of OPTYPE based flashing
4827 */
4828 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4829 flash_offset_support = false;
4830 goto retry_flash;
4831 }
4832
4833 /* The current FW image on the card does not recognize
4834 * the new FLASH op_type. The FW download is partially
4835 * complete. Reboot the server now to enable FW image
4836 * to recognize the new FLASH op_type. To complete the
4837 * remaining process, download the same FW again after
4838 * the reboot.
4839 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304840 dev_err(dev, "Flash incomplete. Reset the server\n");
4841 dev_err(dev, "Download FW image again after reset\n");
4842 return -EAGAIN;
4843 } else if (status) {
4844 dev_err(dev, "Could not get CRC for 0x%x region\n",
4845 img_optype);
4846 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004847 }
4848
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304849 if (crc_match)
4850 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004851
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304852flash:
4853 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004854 if (p + img_size > fw->data + fw->size)
4855 return -1;
4856
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004857 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4858 img_offset);
4859
4860 /* The current FW image on the card does not support OFFSET
4861 * based flashing. Retry using older mechanism of OPTYPE based
4862 * flashing
4863 */
4864 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4865 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4866 flash_offset_support = false;
4867 goto retry_flash;
4868 }
4869
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304870 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4871 * UFI_DIR region
4872 */
Kalesh AP4c600052014-05-30 19:06:26 +05304873 if (old_fw_img &&
4874 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4875 (img_optype == OPTYPE_UFI_DIR &&
4876 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304877 continue;
4878 } else if (status) {
4879 dev_err(dev, "Flashing section type 0x%x failed\n",
4880 img_type);
4881 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004882 }
4883 }
4884 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004885}
4886
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004887static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304888 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004889{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004890#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4891#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304892 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004893 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004894 const u8 *data_ptr = NULL;
4895 u8 *dest_image_ptr = NULL;
4896 size_t image_size = 0;
4897 u32 chunk_size = 0;
4898 u32 data_written = 0;
4899 u32 offset = 0;
4900 int status = 0;
4901 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004902 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004903
4904 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304905 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304906 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004907 }
4908
4909 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4910 + LANCER_FW_DOWNLOAD_CHUNK;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304911 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4912 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304913 if (!flash_cmd.va)
4914 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004915
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004916 dest_image_ptr = flash_cmd.va +
4917 sizeof(struct lancer_cmd_req_write_object);
4918 image_size = fw->size;
4919 data_ptr = fw->data;
4920
4921 while (image_size) {
4922 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4923
4924 /* Copy the image chunk content. */
4925 memcpy(dest_image_ptr, data_ptr, chunk_size);
4926
4927 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004928 chunk_size, offset,
4929 LANCER_FW_DOWNLOAD_LOCATION,
4930 &data_written, &change_status,
4931 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004932 if (status)
4933 break;
4934
4935 offset += data_written;
4936 data_ptr += data_written;
4937 image_size -= data_written;
4938 }
4939
4940 if (!status) {
4941 /* Commit the FW written */
4942 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004943 0, offset,
4944 LANCER_FW_DOWNLOAD_LOCATION,
4945 &data_written, &change_status,
4946 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004947 }
4948
Kalesh APbb864e02014-09-02 09:56:51 +05304949 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004950 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304951 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304952 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004953 }
4954
Kalesh APbb864e02014-09-02 09:56:51 +05304955 dev_info(dev, "Firmware flashed successfully\n");
4956
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004957 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304958 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004959 status = lancer_physdev_ctrl(adapter,
4960 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004961 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304962 dev_err(dev, "Adapter busy, could not reset FW\n");
4963 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004964 }
4965 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304966 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004967 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304968
4969 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004970}
4971
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004972/* Check if the flash image file is compatible with the adapter that
4973 * is being flashed.
4974 */
4975static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4976 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004977{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004978 if (!fhdr) {
4979 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4980 return -1;
4981 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004982
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004983 /* First letter of the build version is used to identify
4984 * which chip this image file is meant for.
4985 */
4986 switch (fhdr->build[0]) {
4987 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004988 if (!skyhawk_chip(adapter))
4989 return false;
4990 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004991 case BLD_STR_UFI_TYPE_BE3:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004992 if (!BE3_chip(adapter))
4993 return false;
4994 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004995 case BLD_STR_UFI_TYPE_BE2:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004996 if (!BE2_chip(adapter))
4997 return false;
4998 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004999 default:
5000 return false;
5001 }
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04005002
5003 return (fhdr->asic_type_rev >= adapter->asic_rev);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005004}
5005
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005006static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
5007{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005008 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00005009 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005010 struct image_hdr *img_hdr_ptr;
5011 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00005012 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00005013
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005014 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
5015 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
5016 dev_err(dev, "Flash image is not compatible with adapter\n");
5017 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00005018 }
5019
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005020 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305021 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
5022 GFP_KERNEL);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005023 if (!flash_cmd.va)
5024 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005025
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005026 num_imgs = le32_to_cpu(fhdr3->num_imgs);
5027 for (i = 0; i < num_imgs; i++) {
5028 img_hdr_ptr = (struct image_hdr *)(fw->data +
5029 (sizeof(struct flash_file_hdr_g3) +
5030 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005031 if (!BE2_chip(adapter) &&
5032 le32_to_cpu(img_hdr_ptr->imageid) != 1)
5033 continue;
5034
5035 if (skyhawk_chip(adapter))
5036 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
5037 num_imgs);
5038 else
5039 status = be_flash_BEx(adapter, fw, &flash_cmd,
5040 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00005041 }
5042
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005043 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
5044 if (!status)
5045 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005046
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005047 return status;
5048}
5049
5050int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
5051{
5052 const struct firmware *fw;
5053 int status;
5054
5055 if (!netif_running(adapter->netdev)) {
5056 dev_err(&adapter->pdev->dev,
5057 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05305058 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005059 }
5060
5061 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5062 if (status)
5063 goto fw_exit;
5064
5065 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5066
5067 if (lancer_chip(adapter))
5068 status = lancer_fw_download(adapter, fw);
5069 else
5070 status = be_fw_download(adapter, fw);
5071
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005072 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05305073 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005074
Ajit Khaparde84517482009-09-04 03:12:16 +00005075fw_exit:
5076 release_firmware(fw);
5077 return status;
5078}
5079
Roopa Prabhuadd511b2015-01-29 22:40:12 -08005080static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5081 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005082{
5083 struct be_adapter *adapter = netdev_priv(dev);
5084 struct nlattr *attr, *br_spec;
5085 int rem;
5086 int status = 0;
5087 u16 mode = 0;
5088
5089 if (!sriov_enabled(adapter))
5090 return -EOPNOTSUPP;
5091
5092 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01005093 if (!br_spec)
5094 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005095
5096 nla_for_each_nested(attr, br_spec, rem) {
5097 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5098 continue;
5099
Thomas Grafb7c1a312014-11-26 13:42:17 +01005100 if (nla_len(attr) < sizeof(mode))
5101 return -EINVAL;
5102
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005103 mode = nla_get_u16(attr);
5104 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5105 return -EINVAL;
5106
5107 status = be_cmd_set_hsw_config(adapter, 0, 0,
5108 adapter->if_handle,
5109 mode == BRIDGE_MODE_VEPA ?
5110 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04005111 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005112 if (status)
5113 goto err;
5114
5115 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5116 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5117
5118 return status;
5119 }
5120err:
5121 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5122 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5123
5124 return status;
5125}
5126
5127static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02005128 struct net_device *dev, u32 filter_mask,
5129 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005130{
5131 struct be_adapter *adapter = netdev_priv(dev);
5132 int status = 0;
5133 u8 hsw_mode;
5134
5135 if (!sriov_enabled(adapter))
5136 return 0;
5137
5138 /* BE and Lancer chips support VEB mode only */
5139 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5140 hsw_mode = PORT_FWD_TYPE_VEB;
5141 } else {
5142 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005143 adapter->if_handle, &hsw_mode,
5144 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005145 if (status)
5146 return 0;
5147 }
5148
5149 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5150 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01005151 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07005152 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005153}
5154
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305155#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005156/* VxLAN offload Notes:
5157 *
5158 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5159 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5160 * is expected to work across all types of IP tunnels once exported. Skyhawk
5161 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305162 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5163 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5164 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005165 *
5166 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5167 * adds more than one port, disable offloads and don't re-enable them again
5168 * until after all the tunnels are removed.
5169 */
Sathya Perlac9c47142014-03-27 10:46:19 +05305170static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5171 __be16 port)
5172{
5173 struct be_adapter *adapter = netdev_priv(netdev);
5174 struct device *dev = &adapter->pdev->dev;
5175 int status;
5176
5177 if (lancer_chip(adapter) || BEx_chip(adapter))
5178 return;
5179
5180 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305181 dev_info(dev,
5182 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005183 dev_info(dev, "Disabling VxLAN offloads\n");
5184 adapter->vxlan_port_count++;
5185 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305186 }
5187
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005188 if (adapter->vxlan_port_count++ >= 1)
5189 return;
5190
Sathya Perlac9c47142014-03-27 10:46:19 +05305191 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5192 OP_CONVERT_NORMAL_TO_TUNNEL);
5193 if (status) {
5194 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5195 goto err;
5196 }
5197
5198 status = be_cmd_set_vxlan_port(adapter, port);
5199 if (status) {
5200 dev_warn(dev, "Failed to add VxLAN port\n");
5201 goto err;
5202 }
5203 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5204 adapter->vxlan_port = port;
5205
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005206 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5207 NETIF_F_TSO | NETIF_F_TSO6 |
5208 NETIF_F_GSO_UDP_TUNNEL;
5209 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305210 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005211
Sathya Perlac9c47142014-03-27 10:46:19 +05305212 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5213 be16_to_cpu(port));
5214 return;
5215err:
5216 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305217}
5218
5219static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5220 __be16 port)
5221{
5222 struct be_adapter *adapter = netdev_priv(netdev);
5223
5224 if (lancer_chip(adapter) || BEx_chip(adapter))
5225 return;
5226
5227 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005228 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305229
5230 be_disable_vxlan_offloads(adapter);
5231
5232 dev_info(&adapter->pdev->dev,
5233 "Disabled VxLAN offloads for UDP port %d\n",
5234 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005235done:
5236 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305237}
Joe Stringer725d5482014-11-13 16:38:13 -08005238
Jesse Gross5f352272014-12-23 22:37:26 -08005239static netdev_features_t be_features_check(struct sk_buff *skb,
5240 struct net_device *dev,
5241 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005242{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305243 struct be_adapter *adapter = netdev_priv(dev);
5244 u8 l4_hdr = 0;
5245
5246 /* The code below restricts offload features for some tunneled packets.
5247 * Offload features for normal (non tunnel) packets are unchanged.
5248 */
5249 if (!skb->encapsulation ||
5250 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5251 return features;
5252
5253 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5254 * should disable tunnel offload features if it's not a VxLAN packet,
5255 * as tunnel offloads have been enabled only for VxLAN. This is done to
5256 * allow other tunneled traffic like GRE work fine while VxLAN
5257 * offloads are configured in Skyhawk-R.
5258 */
5259 switch (vlan_get_protocol(skb)) {
5260 case htons(ETH_P_IP):
5261 l4_hdr = ip_hdr(skb)->protocol;
5262 break;
5263 case htons(ETH_P_IPV6):
5264 l4_hdr = ipv6_hdr(skb)->nexthdr;
5265 break;
5266 default:
5267 return features;
5268 }
5269
5270 if (l4_hdr != IPPROTO_UDP ||
5271 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5272 skb->inner_protocol != htons(ETH_P_TEB) ||
5273 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5274 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5275 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5276
5277 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005278}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305279#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305280
stephen hemmingere5686ad2012-01-05 19:10:25 +00005281static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005282 .ndo_open = be_open,
5283 .ndo_stop = be_close,
5284 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005285 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005286 .ndo_set_mac_address = be_mac_addr_set,
5287 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005288 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005289 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005290 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5291 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005292 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005293 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005294 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005295 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305296 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005297 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005298#ifdef CONFIG_NET_POLL_CONTROLLER
5299 .ndo_poll_controller = be_netpoll,
5300#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005301 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5302 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305303#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305304 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305305#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305306#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305307 .ndo_add_vxlan_port = be_add_vxlan_port,
5308 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005309 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305310#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005311};
5312
5313static void be_netdev_init(struct net_device *netdev)
5314{
5315 struct be_adapter *adapter = netdev_priv(netdev);
5316
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005317 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005318 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005319 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005320 if (be_multi_rxq(adapter))
5321 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005322
5323 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005324 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005325
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005326 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005327 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005328
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005329 netdev->priv_flags |= IFF_UNICAST_FLT;
5330
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005331 netdev->flags |= IFF_MULTICAST;
5332
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005333 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005334
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005335 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005336
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005337 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005338}
5339
Kalesh AP87ac1a52015-02-23 04:20:15 -05005340static void be_cleanup(struct be_adapter *adapter)
5341{
5342 struct net_device *netdev = adapter->netdev;
5343
5344 rtnl_lock();
5345 netif_device_detach(netdev);
5346 if (netif_running(netdev))
5347 be_close(netdev);
5348 rtnl_unlock();
5349
5350 be_clear(adapter);
5351}
5352
Kalesh AP484d76f2015-02-23 04:20:14 -05005353static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005354{
Kalesh APd0e1b312015-02-23 04:20:12 -05005355 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005356 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005357
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005358 status = be_setup(adapter);
5359 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005360 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005361
Kalesh APd0e1b312015-02-23 04:20:12 -05005362 if (netif_running(netdev)) {
5363 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005364 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005365 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005366 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005367
Kalesh APd0e1b312015-02-23 04:20:12 -05005368 netif_device_attach(netdev);
5369
Kalesh AP484d76f2015-02-23 04:20:14 -05005370 return 0;
5371}
5372
5373static int be_err_recover(struct be_adapter *adapter)
5374{
5375 struct device *dev = &adapter->pdev->dev;
5376 int status;
5377
5378 status = be_resume(adapter);
5379 if (status)
5380 goto err;
5381
Sathya Perla9fa465c2015-02-23 04:20:13 -05005382 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005383 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005384err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005385 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305386 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005387 else
5388 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005389
5390 return status;
5391}
5392
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005393static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005394{
5395 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005396 container_of(work, struct be_adapter,
5397 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005398 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005399
5400 be_detect_error(adapter);
5401
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305402 if (be_check_error(adapter, BE_ERROR_HW)) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005403 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005404
5405 /* As of now error recovery support is in Lancer only */
5406 if (lancer_chip(adapter))
5407 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005408 }
5409
Sathya Perla9fa465c2015-02-23 04:20:13 -05005410 /* Always attempt recovery on VFs */
5411 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005412 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005413}
5414
Vasundhara Volam21252372015-02-06 08:18:42 -05005415static void be_log_sfp_info(struct be_adapter *adapter)
5416{
5417 int status;
5418
5419 status = be_cmd_query_sfp_info(adapter);
5420 if (!status) {
5421 dev_err(&adapter->pdev->dev,
5422 "Unqualified SFP+ detected on %c from %s part no: %s",
5423 adapter->port_name, adapter->phy.vendor_name,
5424 adapter->phy.vendor_pn);
5425 }
5426 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5427}
5428
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005429static void be_worker(struct work_struct *work)
5430{
5431 struct be_adapter *adapter =
5432 container_of(work, struct be_adapter, work.work);
5433 struct be_rx_obj *rxo;
5434 int i;
5435
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005436 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005437 * mcc completions
5438 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005439 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005440 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005441 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005442 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005443 goto reschedule;
5444 }
5445
5446 if (!adapter->stats_cmd_sent) {
5447 if (lancer_chip(adapter))
5448 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305449 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005450 else
5451 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5452 }
5453
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305454 if (be_physfn(adapter) &&
5455 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005456 be_cmd_get_die_temperature(adapter);
5457
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005458 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305459 /* Replenish RX-queues starved due to memory
5460 * allocation failures.
5461 */
5462 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305463 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005464 }
5465
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005466 /* EQ-delay update for Skyhawk is done while notifying EQ */
5467 if (!skyhawk_chip(adapter))
5468 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005469
Vasundhara Volam21252372015-02-06 08:18:42 -05005470 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5471 be_log_sfp_info(adapter);
5472
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005473reschedule:
5474 adapter->work_counter++;
5475 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5476}
5477
Sathya Perla78fad34e2015-02-23 04:20:08 -05005478static void be_unmap_pci_bars(struct be_adapter *adapter)
5479{
5480 if (adapter->csr)
5481 pci_iounmap(adapter->pdev, adapter->csr);
5482 if (adapter->db)
5483 pci_iounmap(adapter->pdev, adapter->db);
5484}
5485
5486static int db_bar(struct be_adapter *adapter)
5487{
Kalesh AP18c57c72015-05-06 05:30:38 -04005488 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005489 return 0;
5490 else
5491 return 4;
5492}
5493
5494static int be_roce_map_pci_bars(struct be_adapter *adapter)
5495{
5496 if (skyhawk_chip(adapter)) {
5497 adapter->roce_db.size = 4096;
5498 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5499 db_bar(adapter));
5500 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5501 db_bar(adapter));
5502 }
5503 return 0;
5504}
5505
5506static int be_map_pci_bars(struct be_adapter *adapter)
5507{
David S. Miller0fa74a42015-03-20 18:51:09 -04005508 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005509 u8 __iomem *addr;
5510 u32 sli_intf;
5511
5512 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5513 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5514 SLI_INTF_FAMILY_SHIFT;
5515 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5516
5517 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a42015-03-20 18:51:09 -04005518 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005519 if (!adapter->csr)
5520 return -ENOMEM;
5521 }
5522
David S. Miller0fa74a42015-03-20 18:51:09 -04005523 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005524 if (!addr)
5525 goto pci_map_err;
5526 adapter->db = addr;
5527
David S. Miller0fa74a42015-03-20 18:51:09 -04005528 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5529 if (be_physfn(adapter)) {
5530 /* PCICFG is the 2nd BAR in BE2 */
5531 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5532 if (!addr)
5533 goto pci_map_err;
5534 adapter->pcicfg = addr;
5535 } else {
5536 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5537 }
5538 }
5539
Sathya Perla78fad34e2015-02-23 04:20:08 -05005540 be_roce_map_pci_bars(adapter);
5541 return 0;
5542
5543pci_map_err:
David S. Miller0fa74a42015-03-20 18:51:09 -04005544 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005545 be_unmap_pci_bars(adapter);
5546 return -ENOMEM;
5547}
5548
5549static void be_drv_cleanup(struct be_adapter *adapter)
5550{
5551 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5552 struct device *dev = &adapter->pdev->dev;
5553
5554 if (mem->va)
5555 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5556
5557 mem = &adapter->rx_filter;
5558 if (mem->va)
5559 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5560
5561 mem = &adapter->stats_cmd;
5562 if (mem->va)
5563 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5564}
5565
5566/* Allocate and initialize various fields in be_adapter struct */
5567static int be_drv_init(struct be_adapter *adapter)
5568{
5569 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5570 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5571 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5572 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5573 struct device *dev = &adapter->pdev->dev;
5574 int status = 0;
5575
5576 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305577 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5578 &mbox_mem_alloc->dma,
5579 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005580 if (!mbox_mem_alloc->va)
5581 return -ENOMEM;
5582
5583 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5584 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5585 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005586
5587 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5588 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5589 &rx_filter->dma, GFP_KERNEL);
5590 if (!rx_filter->va) {
5591 status = -ENOMEM;
5592 goto free_mbox;
5593 }
5594
5595 if (lancer_chip(adapter))
5596 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5597 else if (BE2_chip(adapter))
5598 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5599 else if (BE3_chip(adapter))
5600 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5601 else
5602 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5603 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5604 &stats_cmd->dma, GFP_KERNEL);
5605 if (!stats_cmd->va) {
5606 status = -ENOMEM;
5607 goto free_rx_filter;
5608 }
5609
5610 mutex_init(&adapter->mbox_lock);
5611 spin_lock_init(&adapter->mcc_lock);
5612 spin_lock_init(&adapter->mcc_cq_lock);
5613 init_completion(&adapter->et_cmd_compl);
5614
5615 pci_save_state(adapter->pdev);
5616
5617 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005618 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5619 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005620
5621 adapter->rx_fc = true;
5622 adapter->tx_fc = true;
5623
5624 /* Must be a power of 2 or else MODULO will BUG_ON */
5625 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005626
5627 return 0;
5628
5629free_rx_filter:
5630 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5631free_mbox:
5632 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5633 mbox_mem_alloc->dma);
5634 return status;
5635}
5636
5637static void be_remove(struct pci_dev *pdev)
5638{
5639 struct be_adapter *adapter = pci_get_drvdata(pdev);
5640
5641 if (!adapter)
5642 return;
5643
5644 be_roce_dev_remove(adapter);
5645 be_intr_set(adapter, false);
5646
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005647 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005648
5649 unregister_netdev(adapter->netdev);
5650
5651 be_clear(adapter);
5652
5653 /* tell fw we're done with firing cmds */
5654 be_cmd_fw_clean(adapter);
5655
5656 be_unmap_pci_bars(adapter);
5657 be_drv_cleanup(adapter);
5658
5659 pci_disable_pcie_error_reporting(pdev);
5660
5661 pci_release_regions(pdev);
5662 pci_disable_device(pdev);
5663
5664 free_netdev(adapter->netdev);
5665}
5666
Arnd Bergmann9a032592015-05-18 23:06:45 +02005667static ssize_t be_hwmon_show_temp(struct device *dev,
5668 struct device_attribute *dev_attr,
5669 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305670{
5671 struct be_adapter *adapter = dev_get_drvdata(dev);
5672
5673 /* Unit: millidegree Celsius */
5674 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5675 return -EIO;
5676 else
5677 return sprintf(buf, "%u\n",
5678 adapter->hwmon_info.be_on_die_temp * 1000);
5679}
5680
5681static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5682 be_hwmon_show_temp, NULL, 1);
5683
5684static struct attribute *be_hwmon_attrs[] = {
5685 &sensor_dev_attr_temp1_input.dev_attr.attr,
5686 NULL
5687};
5688
5689ATTRIBUTE_GROUPS(be_hwmon);
5690
Sathya Perlad3791422012-09-28 04:39:44 +00005691static char *mc_name(struct be_adapter *adapter)
5692{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305693 char *str = ""; /* default */
5694
5695 switch (adapter->mc_type) {
5696 case UMC:
5697 str = "UMC";
5698 break;
5699 case FLEX10:
5700 str = "FLEX10";
5701 break;
5702 case vNIC1:
5703 str = "vNIC-1";
5704 break;
5705 case nPAR:
5706 str = "nPAR";
5707 break;
5708 case UFP:
5709 str = "UFP";
5710 break;
5711 case vNIC2:
5712 str = "vNIC-2";
5713 break;
5714 default:
5715 str = "";
5716 }
5717
5718 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005719}
5720
5721static inline char *func_name(struct be_adapter *adapter)
5722{
5723 return be_physfn(adapter) ? "PF" : "VF";
5724}
5725
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005726static inline char *nic_name(struct pci_dev *pdev)
5727{
5728 switch (pdev->device) {
5729 case OC_DEVICE_ID1:
5730 return OC_NAME;
5731 case OC_DEVICE_ID2:
5732 return OC_NAME_BE;
5733 case OC_DEVICE_ID3:
5734 case OC_DEVICE_ID4:
5735 return OC_NAME_LANCER;
5736 case BE_DEVICE_ID2:
5737 return BE3_NAME;
5738 case OC_DEVICE_ID5:
5739 case OC_DEVICE_ID6:
5740 return OC_NAME_SH;
5741 default:
5742 return BE_NAME;
5743 }
5744}
5745
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005746static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005747{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005748 struct be_adapter *adapter;
5749 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005750 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005751
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305752 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5753
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005754 status = pci_enable_device(pdev);
5755 if (status)
5756 goto do_none;
5757
5758 status = pci_request_regions(pdev, DRV_NAME);
5759 if (status)
5760 goto disable_dev;
5761 pci_set_master(pdev);
5762
Sathya Perla7f640062012-06-05 19:37:20 +00005763 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305764 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005765 status = -ENOMEM;
5766 goto rel_reg;
5767 }
5768 adapter = netdev_priv(netdev);
5769 adapter->pdev = pdev;
5770 pci_set_drvdata(pdev, adapter);
5771 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005772 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005773
Russell King4c15c242013-06-26 23:49:11 +01005774 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005775 if (!status) {
5776 netdev->features |= NETIF_F_HIGHDMA;
5777 } else {
Russell King4c15c242013-06-26 23:49:11 +01005778 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005779 if (status) {
5780 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5781 goto free_netdev;
5782 }
5783 }
5784
Kalesh AP2f951a92014-09-12 17:39:21 +05305785 status = pci_enable_pcie_error_reporting(pdev);
5786 if (!status)
5787 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005788
Sathya Perla78fad34e2015-02-23 04:20:08 -05005789 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005790 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005791 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005792
Sathya Perla78fad34e2015-02-23 04:20:08 -05005793 status = be_drv_init(adapter);
5794 if (status)
5795 goto unmap_bars;
5796
Sathya Perla5fb379e2009-06-18 00:02:59 +00005797 status = be_setup(adapter);
5798 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005799 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005800
Sathya Perla3abcded2010-10-03 22:12:27 -07005801 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005802 status = register_netdev(netdev);
5803 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005804 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005805
Parav Pandit045508a2012-03-26 14:27:13 +00005806 be_roce_dev_add(adapter);
5807
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005808 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005809
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305810 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005811 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305812 adapter->hwmon_info.hwmon_dev =
5813 devm_hwmon_device_register_with_groups(&pdev->dev,
5814 DRV_NAME,
5815 adapter,
5816 be_hwmon_groups);
5817 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5818 }
5819
Sathya Perlad3791422012-09-28 04:39:44 +00005820 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005821 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005822
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005823 return 0;
5824
Sathya Perla5fb379e2009-06-18 00:02:59 +00005825unsetup:
5826 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005827drv_cleanup:
5828 be_drv_cleanup(adapter);
5829unmap_bars:
5830 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005831free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005832 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005833rel_reg:
5834 pci_release_regions(pdev);
5835disable_dev:
5836 pci_disable_device(pdev);
5837do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005838 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005839 return status;
5840}
5841
5842static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5843{
5844 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005845
Suresh Reddy76a9e082014-01-15 13:23:40 +05305846 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005847 be_setup_wol(adapter, true);
5848
Ajit Khaparded4360d62013-11-22 12:51:09 -06005849 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005850 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005851
Kalesh AP87ac1a52015-02-23 04:20:15 -05005852 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005853
5854 pci_save_state(pdev);
5855 pci_disable_device(pdev);
5856 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5857 return 0;
5858}
5859
Kalesh AP484d76f2015-02-23 04:20:14 -05005860static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005861{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005862 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005863 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005864
5865 status = pci_enable_device(pdev);
5866 if (status)
5867 return status;
5868
Yijing Wang1ca01512013-06-27 20:53:42 +08005869 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005870 pci_restore_state(pdev);
5871
Kalesh AP484d76f2015-02-23 04:20:14 -05005872 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005873 if (status)
5874 return status;
5875
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005876 be_schedule_err_detection(adapter);
5877
Suresh Reddy76a9e082014-01-15 13:23:40 +05305878 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005879 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005880
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005881 return 0;
5882}
5883
Sathya Perla82456b02010-02-17 01:35:37 +00005884/*
5885 * An FLR will stop BE from DMAing any data.
5886 */
5887static void be_shutdown(struct pci_dev *pdev)
5888{
5889 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005890
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005891 if (!adapter)
5892 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005893
Devesh Sharmad114f992014-06-10 19:32:15 +05305894 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005895 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005896 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005897
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005898 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005899
Ajit Khaparde57841862011-04-06 18:08:43 +00005900 be_cmd_reset_function(adapter);
5901
Sathya Perla82456b02010-02-17 01:35:37 +00005902 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005903}
5904
Sathya Perlacf588472010-02-14 21:22:01 +00005905static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305906 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005907{
5908 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005909
5910 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5911
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305912 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5913 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005914
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005915 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005916
Kalesh AP87ac1a52015-02-23 04:20:15 -05005917 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005918 }
Sathya Perlacf588472010-02-14 21:22:01 +00005919
5920 if (state == pci_channel_io_perm_failure)
5921 return PCI_ERS_RESULT_DISCONNECT;
5922
5923 pci_disable_device(pdev);
5924
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005925 /* The error could cause the FW to trigger a flash debug dump.
5926 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005927 * can cause it not to recover; wait for it to finish.
5928 * Wait only for first function as it is needed only once per
5929 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005930 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005931 if (pdev->devfn == 0)
5932 ssleep(30);
5933
Sathya Perlacf588472010-02-14 21:22:01 +00005934 return PCI_ERS_RESULT_NEED_RESET;
5935}
5936
5937static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5938{
5939 struct be_adapter *adapter = pci_get_drvdata(pdev);
5940 int status;
5941
5942 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005943
5944 status = pci_enable_device(pdev);
5945 if (status)
5946 return PCI_ERS_RESULT_DISCONNECT;
5947
5948 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005949 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005950 pci_restore_state(pdev);
5951
5952 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005953 dev_info(&adapter->pdev->dev,
5954 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005955 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005956 if (status)
5957 return PCI_ERS_RESULT_DISCONNECT;
5958
Sathya Perlad6b6d982012-09-05 01:56:48 +00005959 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305960 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005961 return PCI_ERS_RESULT_RECOVERED;
5962}
5963
5964static void be_eeh_resume(struct pci_dev *pdev)
5965{
5966 int status = 0;
5967 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005968
5969 dev_info(&adapter->pdev->dev, "EEH resume\n");
5970
5971 pci_save_state(pdev);
5972
Kalesh AP484d76f2015-02-23 04:20:14 -05005973 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005974 if (status)
5975 goto err;
5976
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005977 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005978 return;
5979err:
5980 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005981}
5982
Vasundhara Volamace40af2015-03-04 00:44:34 -05005983static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5984{
5985 struct be_adapter *adapter = pci_get_drvdata(pdev);
5986 u16 num_vf_qs;
5987 int status;
5988
5989 if (!num_vfs)
5990 be_vf_clear(adapter);
5991
5992 adapter->num_vfs = num_vfs;
5993
5994 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5995 dev_warn(&pdev->dev,
5996 "Cannot disable VFs while they are assigned\n");
5997 return -EBUSY;
5998 }
5999
6000 /* When the HW is in SRIOV capable configuration, the PF-pool resources
6001 * are equally distributed across the max-number of VFs. The user may
6002 * request only a subset of the max-vfs to be enabled.
6003 * Based on num_vfs, redistribute the resources across num_vfs so that
6004 * each VF will have access to more number of resources.
6005 * This facility is not available in BE3 FW.
6006 * Also, this is done by FW in Lancer chip.
6007 */
6008 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6009 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
6010 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6011 adapter->num_vfs, num_vf_qs);
6012 if (status)
6013 dev_err(&pdev->dev,
6014 "Failed to optimize SR-IOV resources\n");
6015 }
6016
6017 status = be_get_resources(adapter);
6018 if (status)
6019 return be_cmd_status(status);
6020
6021 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6022 rtnl_lock();
6023 status = be_update_queues(adapter);
6024 rtnl_unlock();
6025 if (status)
6026 return be_cmd_status(status);
6027
6028 if (adapter->num_vfs)
6029 status = be_vf_setup(adapter);
6030
6031 if (!status)
6032 return adapter->num_vfs;
6033
6034 return 0;
6035}
6036
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006037static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006038 .error_detected = be_eeh_err_detected,
6039 .slot_reset = be_eeh_reset,
6040 .resume = be_eeh_resume,
6041};
6042
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006043static struct pci_driver be_driver = {
6044 .name = DRV_NAME,
6045 .id_table = be_dev_ids,
6046 .probe = be_probe,
6047 .remove = be_remove,
6048 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006049 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006050 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006051 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006052 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006053};
6054
6055static int __init be_init_module(void)
6056{
Joe Perches8e95a202009-12-03 07:58:21 +00006057 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6058 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006059 printk(KERN_WARNING DRV_NAME
6060 " : Module param rx_frag_size must be 2048/4096/8192."
6061 " Using 2048\n");
6062 rx_frag_size = 2048;
6063 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006064
Vasundhara Volamace40af2015-03-04 00:44:34 -05006065 if (num_vfs > 0) {
6066 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6067 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6068 }
6069
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006070 return pci_register_driver(&be_driver);
6071}
6072module_init(be_init_module);
6073
6074static void __exit be_exit_module(void)
6075{
6076 pci_unregister_driver(&be_driver);
6077}
6078module_exit(be_exit_module);