blob: 14ae67a8949e3979fd40daa68f5005495bd916a6 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volamd19261b2015-05-06 05:30:39 -04002 * Copyright (C) 2005 - 2015 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070029MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000030MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070031MODULE_LICENSE("GPL");
32
Vasundhara Volamace40af2015-03-04 00:44:34 -050033/* num_vfs module param is obsolete.
34 * Use sysfs method to enable/disable VFs.
35 */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000037module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000038MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039
Sathya Perla11ac75e2011-12-13 00:58:50 +000040static ushort rx_frag_size = 2048;
41module_param(rx_frag_size, ushort, S_IRUGO);
42MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
43
Benoit Taine9baa3c32014-08-08 15:56:03 +020044static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070046 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070047 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000051 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000052 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070053 { 0 }
54};
55MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000056/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070057static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000058 "CEV",
59 "CTX",
60 "DBUF",
61 "ERX",
62 "Host",
63 "MPU",
64 "NDMA",
65 "PTC ",
66 "RDMA ",
67 "RXF ",
68 "RXIPS ",
69 "RXULP0 ",
70 "RXULP1 ",
71 "RXULP2 ",
72 "TIM ",
73 "TPOST ",
74 "TPRE ",
75 "TXIPS ",
76 "TXULP0 ",
77 "TXULP1 ",
78 "UC ",
79 "WDMA ",
80 "TXULP2 ",
81 "HOST1 ",
82 "P0_OB_LINK ",
83 "P1_OB_LINK ",
84 "HOST_GPIO ",
85 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053086 "ERX2 ",
87 "SPARE ",
88 "JTAG ",
89 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000090};
Kalesh APe2fb1af2014-09-19 15:46:58 +053091
Ajit Khaparde7c185272010-07-29 06:16:33 +000092/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070093static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000094 "LPCMEMHOST",
95 "MGMT_MAC",
96 "PCS0ONLINE",
97 "MPU_IRAM",
98 "PCS1ONLINE",
99 "PCTL0",
100 "PCTL1",
101 "PMEM",
102 "RR",
103 "TXPB",
104 "RXPP",
105 "XAUI",
106 "TXP",
107 "ARM",
108 "IPC",
109 "HOST2",
110 "HOST3",
111 "HOST4",
112 "HOST5",
113 "HOST6",
114 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "ECRC",
116 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700117 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530118 "PERIPH",
119 "LLTXULP",
120 "D2P",
121 "RCON",
122 "LDMA",
123 "LLTXP",
124 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000125 "Unknown"
126};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530131
Sathya Perla1cfafab2012-02-23 18:50:15 +0000132 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000133 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000135 mem->va = NULL;
136 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137}
138
139static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530140 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141{
142 struct be_dma_mem *mem = &q->dma_mem;
143
144 memset(q, 0, sizeof(*q));
145 q->len = len;
146 q->entry_size = entry_size;
147 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700148 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000151 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return 0;
153}
154
Somnath Kotur68c45a22013-03-14 02:42:07 +0000155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156{
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530160 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000169
Sathya Perladb3ea782011-08-22 19:41:52 +0000170 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700172}
173
Somnath Kotur68c45a22013-03-14 02:42:07 +0000174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530182 if (be_check_error(adapter, BE_ERROR_EEH))
Somnath Kotur68c45a22013-03-14 02:42:07 +0000183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
Sathya Perla8788fdc2009-07-27 22:52:03 +0000190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700191{
192 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530193
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530194 if (be_check_error(adapter, BE_ERROR_HW))
195 return;
196
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197 val |= qid & DB_RQ_RING_ID_MASK;
198 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000199
200 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000201 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700202}
203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
205 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530208
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530209 if (be_check_error(adapter, BE_ERROR_HW))
210 return;
211
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000212 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000214
215 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000216 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla8788fdc2009-07-27 22:52:03 +0000219static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400220 bool arm, bool clear_int, u16 num_popped,
221 u32 eq_delay_mult_enc)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222{
223 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530224
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700225 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530226 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000227
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530228 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000229 return;
230
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (arm)
232 val |= 1 << DB_EQ_REARM_SHIFT;
233 if (clear_int)
234 val |= 1 << DB_EQ_CLR_SHIFT;
235 val |= 1 << DB_EQ_EVNT_SHIFT;
236 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -0400237 val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000238 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239}
240
Sathya Perla8788fdc2009-07-27 22:52:03 +0000241void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242{
243 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530244
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000246 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
247 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000248
Venkata Duvvuru954f6822015-05-13 13:00:13 +0530249 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perlacf588472010-02-14 21:22:01 +0000250 return;
251
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 if (arm)
253 val |= 1 << DB_CQ_REARM_SHIFT;
254 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000255 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256}
257
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700258static int be_mac_addr_set(struct net_device *netdev, void *p)
259{
260 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530261 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700262 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530263 int status;
264 u8 mac[ETH_ALEN];
265 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000267 if (!is_valid_ether_addr(addr->sa_data))
268 return -EADDRNOTAVAIL;
269
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530270 /* Proceed further only if, User provided MAC is different
271 * from active MAC
272 */
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0;
275
Kalesh APbcc84142015-08-05 03:27:48 -0400276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
Sathya Perla5a712c12013-07-23 15:24:59 +0530280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
281 * privilege or if PF did not provision the new MAC address.
282 * On BE3, this cmd will always fail if the VF doesn't have the
283 * FILTMGMT privilege. This failure is OK, only if the PF programmed
284 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000285 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530286 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
287 adapter->if_handle, &adapter->pmac_id[0], 0);
288 if (!status) {
289 curr_pmac_id = adapter->pmac_id[0];
290
291 /* Delete the old programmed MAC. This call may fail if the
292 * old MAC was already deleted by the PF driver.
293 */
294 if (adapter->pmac_id[0] != old_pmac_id)
295 be_cmd_pmac_del(adapter, adapter->if_handle,
296 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000297 }
298
Sathya Perla5a712c12013-07-23 15:24:59 +0530299 /* Decide if the new MAC is successfully activated only after
300 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000301 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530302 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
303 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000304 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000305 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306
Sathya Perla5a712c12013-07-23 15:24:59 +0530307 /* The MAC change did not happen, either due to lack of privilege
308 * or PF didn't pre-provision.
309 */
dingtianhong61d23e92013-12-30 15:40:43 +0800310 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530311 status = -EPERM;
312 goto err;
313 }
Kalesh APbcc84142015-08-05 03:27:48 -0400314done:
315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000317 return 0;
318err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700320 return status;
321}
322
Sathya Perlaca34fe32012-11-06 17:48:56 +0000323/* BE2 supports only v0 cmd */
324static void *hw_stats_from_cmd(struct be_adapter *adapter)
325{
326 if (BE2_chip(adapter)) {
327 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
328
329 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500330 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000331 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
332
333 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else {
335 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
336
337 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000338 }
339}
340
341/* BE2 supports only v0 cmd */
342static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
343{
344 if (BE2_chip(adapter)) {
345 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
346
347 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500348 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000349 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
350
351 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500352 } else {
353 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
354
355 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000356 }
357}
358
359static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000360{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000361 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
362 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
363 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000364 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000365 &rxf_stats->port[adapter->port_num];
366 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000367
Sathya Perlaac124ff2011-07-25 19:10:14 +0000368 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000369 drvs->rx_pause_frames = port_stats->rx_pause_frames;
370 drvs->rx_crc_errors = port_stats->rx_crc_errors;
371 drvs->rx_control_frames = port_stats->rx_control_frames;
372 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
374 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
376 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
377 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
378 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
379 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
380 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000383 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000384 drvs->rx_dropped_header_too_small =
385 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000386 drvs->rx_address_filtered =
387 port_stats->rx_address_filtered +
388 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000389 drvs->rx_alignment_symbol_errors =
390 port_stats->rx_alignment_symbol_errors;
391
392 drvs->tx_pauseframes = port_stats->tx_pauseframes;
393 drvs->tx_controlframes = port_stats->tx_controlframes;
394
395 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000397 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000398 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000400 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000401 drvs->forwarded_packets = rxf_stats->forwarded_packets;
402 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
404 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000405 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
406}
407
Sathya Perlaca34fe32012-11-06 17:48:56 +0000408static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000409{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000410 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
411 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
412 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000413 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000414 &rxf_stats->port[adapter->port_num];
415 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000416
Sathya Perlaac124ff2011-07-25 19:10:14 +0000417 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000418 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
419 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000420 drvs->rx_pause_frames = port_stats->rx_pause_frames;
421 drvs->rx_crc_errors = port_stats->rx_crc_errors;
422 drvs->rx_control_frames = port_stats->rx_control_frames;
423 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
424 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
425 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
426 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
427 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
428 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
429 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
430 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
431 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
432 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
433 drvs->rx_dropped_header_too_small =
434 port_stats->rx_dropped_header_too_small;
435 drvs->rx_input_fifo_overflow_drop =
436 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000437 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000438 drvs->rx_alignment_symbol_errors =
439 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000440 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 drvs->tx_pauseframes = port_stats->tx_pauseframes;
442 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000443 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 drvs->jabber_events = port_stats->jabber_events;
445 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000447 drvs->forwarded_packets = rxf_stats->forwarded_packets;
448 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000449 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
450 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000451 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
452}
453
Ajit Khaparde61000862013-10-03 16:16:33 -0500454static void populate_be_v2_stats(struct be_adapter *adapter)
455{
456 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
457 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
458 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
459 struct be_port_rxf_stats_v2 *port_stats =
460 &rxf_stats->port[adapter->port_num];
461 struct be_drv_stats *drvs = &adapter->drv_stats;
462
463 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
464 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
465 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
466 drvs->rx_pause_frames = port_stats->rx_pause_frames;
467 drvs->rx_crc_errors = port_stats->rx_crc_errors;
468 drvs->rx_control_frames = port_stats->rx_control_frames;
469 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
470 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
471 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
472 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
473 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
474 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
475 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
476 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
477 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
478 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
479 drvs->rx_dropped_header_too_small =
480 port_stats->rx_dropped_header_too_small;
481 drvs->rx_input_fifo_overflow_drop =
482 port_stats->rx_input_fifo_overflow_drop;
483 drvs->rx_address_filtered = port_stats->rx_address_filtered;
484 drvs->rx_alignment_symbol_errors =
485 port_stats->rx_alignment_symbol_errors;
486 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
487 drvs->tx_pauseframes = port_stats->tx_pauseframes;
488 drvs->tx_controlframes = port_stats->tx_controlframes;
489 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
490 drvs->jabber_events = port_stats->jabber_events;
491 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
492 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
493 drvs->forwarded_packets = rxf_stats->forwarded_packets;
494 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
495 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
496 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
497 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530498 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500499 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
500 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
501 drvs->rx_roce_frames = port_stats->roce_frames_received;
502 drvs->roce_drops_crc = port_stats->roce_drops_crc;
503 drvs->roce_drops_payload_len =
504 port_stats->roce_drops_payload_len;
505 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500506}
507
Selvin Xavier005d5692011-05-16 07:36:35 +0000508static void populate_lancer_stats(struct be_adapter *adapter)
509{
Selvin Xavier005d5692011-05-16 07:36:35 +0000510 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530511 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000512
513 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
514 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
515 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
516 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000517 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000518 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000519 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
520 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
521 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
522 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
523 drvs->rx_dropped_tcp_length =
524 pport_stats->rx_dropped_invalid_tcp_length;
525 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
526 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
527 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
528 drvs->rx_dropped_header_too_small =
529 pport_stats->rx_dropped_header_too_small;
530 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000531 drvs->rx_address_filtered =
532 pport_stats->rx_address_filtered +
533 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000534 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000535 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000536 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
537 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000538 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000539 drvs->forwarded_packets = pport_stats->num_forwards_lo;
540 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000541 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000542 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000543}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000544
Sathya Perla09c1c682011-08-22 19:41:53 +0000545static void accumulate_16bit_val(u32 *acc, u16 val)
546{
547#define lo(x) (x & 0xFFFF)
548#define hi(x) (x & 0xFFFF0000)
549 bool wrapped = val < lo(*acc);
550 u32 newacc = hi(*acc) + val;
551
552 if (wrapped)
553 newacc += 65536;
554 ACCESS_ONCE(*acc) = newacc;
555}
556
Jingoo Han4188e7d2013-08-05 18:02:02 +0900557static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530558 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000559{
560 if (!BEx_chip(adapter))
561 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
562 else
563 /* below erx HW counter can actually wrap around after
564 * 65535. Driver accumulates a 32-bit value
565 */
566 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
567 (u16)erx_stat);
568}
569
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000570void be_parse_stats(struct be_adapter *adapter)
571{
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573 struct be_rx_obj *rxo;
574 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000575 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000576
Sathya Perlaca34fe32012-11-06 17:48:56 +0000577 if (lancer_chip(adapter)) {
578 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000579 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000580 if (BE2_chip(adapter))
581 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500582 else if (BE3_chip(adapter))
583 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000584 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500585 else
586 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000587
Ajit Khaparde61000862013-10-03 16:16:33 -0500588 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000589 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000590 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
591 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000592 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000593 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000594}
595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530597 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700598{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000600 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700601 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000602 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000603 u64 pkts, bytes;
604 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700605 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700606
Sathya Perla3abcded2010-10-03 22:12:27 -0700607 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000608 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530609
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700611 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000612 pkts = rx_stats(rxo)->rx_pkts;
613 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700614 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000615 stats->rx_packets += pkts;
616 stats->rx_bytes += bytes;
617 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
618 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
619 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700620 }
621
Sathya Perla3c8def92011-06-12 20:01:58 +0000622 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000623 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530624
Sathya Perlaab1594e2011-07-25 19:10:15 +0000625 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700626 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000627 pkts = tx_stats(txo)->tx_pkts;
628 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700629 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 stats->tx_packets += pkts;
631 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000632 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633
634 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000635 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000636 drvs->rx_alignment_symbol_errors +
637 drvs->rx_in_range_errors +
638 drvs->rx_out_range_errors +
639 drvs->rx_frame_too_long +
640 drvs->rx_dropped_too_small +
641 drvs->rx_dropped_too_short +
642 drvs->rx_dropped_header_too_small +
643 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700645
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700646 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000648 drvs->rx_out_range_errors +
649 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000650
Sathya Perlaab1594e2011-07-25 19:10:15 +0000651 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652
653 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000654 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000655
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 /* receiver fifo overrun */
657 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000658 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000659 drvs->rx_input_fifo_overflow_drop +
660 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000661 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700662}
663
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000664void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700665{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700666 struct net_device *netdev = adapter->netdev;
667
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000668 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000669 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000670 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000672
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530673 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000674 netif_carrier_on(netdev);
675 else
676 netif_carrier_off(netdev);
Ivan Vecera18824892015-04-30 11:59:49 +0200677
678 netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500681static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682{
Sathya Perla3c8def92011-06-12 20:01:58 +0000683 struct be_tx_stats *stats = tx_stats(txo);
684
Sathya Perlaab1594e2011-07-25 19:10:15 +0000685 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000686 stats->tx_reqs++;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500687 stats->tx_bytes += skb->len;
688 stats->tx_pkts += (skb_shinfo(skb)->gso_segs ? : 1);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000689 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690}
691
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500692/* Returns number of WRBs needed for the skb */
693static u32 skb_wrb_cnt(struct sk_buff *skb)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694{
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500695 /* +1 for the header wrb */
696 return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697}
698
699static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
700{
Sathya Perlaf986afc2015-02-06 08:18:43 -0500701 wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
702 wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
703 wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
704 wrb->rsvd0 = 0;
705}
706
707/* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
708 * to avoid the swap and shift/mask operations in wrb_fill().
709 */
710static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
711{
712 wrb->frag_pa_hi = 0;
713 wrb->frag_pa_lo = 0;
714 wrb->frag_len = 0;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000715 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700716}
717
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000718static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530719 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000720{
721 u8 vlan_prio;
722 u16 vlan_tag;
723
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100724 vlan_tag = skb_vlan_tag_get(skb);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000725 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
726 /* If vlan priority provided by OS is NOT in available bmap */
727 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
728 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
729 adapter->recommended_prio;
730
731 return vlan_tag;
732}
733
Sathya Perlac9c47142014-03-27 10:46:19 +0530734/* Used only for IP tunnel packets */
735static u16 skb_inner_ip_proto(struct sk_buff *skb)
736{
737 return (inner_ip_hdr(skb)->version == 4) ?
738 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
739}
740
741static u16 skb_ip_proto(struct sk_buff *skb)
742{
743 return (ip_hdr(skb)->version == 4) ?
744 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
745}
746
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +0530747static inline bool be_is_txq_full(struct be_tx_obj *txo)
748{
749 return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
750}
751
752static inline bool be_can_txq_wake(struct be_tx_obj *txo)
753{
754 return atomic_read(&txo->q.used) < txo->q.len / 2;
755}
756
757static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
758{
759 return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
760}
761
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530762static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
763 struct sk_buff *skb,
764 struct be_wrb_params *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765{
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530766 u16 proto;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000768 if (skb_is_gso(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530769 BE_WRB_F_SET(wrb_params->features, LSO, 1);
770 wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000771 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530772 BE_WRB_F_SET(wrb_params->features, LSO6, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700773 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530774 if (skb->encapsulation) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530775 BE_WRB_F_SET(wrb_params->features, IPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530776 proto = skb_inner_ip_proto(skb);
777 } else {
778 proto = skb_ip_proto(skb);
779 }
780 if (proto == IPPROTO_TCP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530781 BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530782 else if (proto == IPPROTO_UDP)
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530783 BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700784 }
785
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100786 if (skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530787 BE_WRB_F_SET(wrb_params->features, VLAN, 1);
788 wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700789 }
790
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530791 BE_WRB_F_SET(wrb_params->features, CRC, 1);
792}
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500793
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530794static void wrb_fill_hdr(struct be_adapter *adapter,
795 struct be_eth_hdr_wrb *hdr,
796 struct be_wrb_params *wrb_params,
797 struct sk_buff *skb)
798{
799 memset(hdr, 0, sizeof(*hdr));
800
801 SET_TX_WRB_HDR_BITS(crc, hdr,
802 BE_WRB_F_GET(wrb_params->features, CRC));
803 SET_TX_WRB_HDR_BITS(ipcs, hdr,
804 BE_WRB_F_GET(wrb_params->features, IPCS));
805 SET_TX_WRB_HDR_BITS(tcpcs, hdr,
806 BE_WRB_F_GET(wrb_params->features, TCPCS));
807 SET_TX_WRB_HDR_BITS(udpcs, hdr,
808 BE_WRB_F_GET(wrb_params->features, UDPCS));
809
810 SET_TX_WRB_HDR_BITS(lso, hdr,
811 BE_WRB_F_GET(wrb_params->features, LSO));
812 SET_TX_WRB_HDR_BITS(lso6, hdr,
813 BE_WRB_F_GET(wrb_params->features, LSO6));
814 SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
815
816 /* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
817 * hack is not needed, the evt bit is set while ringing DB.
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500818 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530819 SET_TX_WRB_HDR_BITS(event, hdr,
820 BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
821 SET_TX_WRB_HDR_BITS(vlan, hdr,
822 BE_WRB_F_GET(wrb_params->features, VLAN));
823 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
824
825 SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
826 SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
Venkata Duvvuru760c2952015-05-13 13:00:14 +0530827 SET_TX_WRB_HDR_BITS(mgmt, hdr,
828 BE_WRB_F_GET(wrb_params->features, OS2BMC));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829}
830
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000831static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530832 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000833{
834 dma_addr_t dma;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500835 u32 frag_len = le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000836
Sathya Perla7101e112010-03-22 20:41:12 +0000837
Sathya Perlaf986afc2015-02-06 08:18:43 -0500838 dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
839 (u64)le32_to_cpu(wrb->frag_pa_lo);
840 if (frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000841 if (unmap_single)
Sathya Perlaf986afc2015-02-06 08:18:43 -0500842 dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000843 else
Sathya Perlaf986afc2015-02-06 08:18:43 -0500844 dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000845 }
846}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700847
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530848/* Grab a WRB header for xmit */
849static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850{
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530851 u16 head = txo->q.head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530853 queue_head_inc(&txo->q);
854 return head;
855}
856
857/* Set up the WRB header for xmit */
858static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
859 struct be_tx_obj *txo,
860 struct be_wrb_params *wrb_params,
861 struct sk_buff *skb, u16 head)
862{
863 u32 num_frags = skb_wrb_cnt(skb);
864 struct be_queue_info *txq = &txo->q;
865 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
866
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530867 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500868 be_dws_cpu_to_le(hdr, sizeof(*hdr));
869
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500870 BUG_ON(txo->sent_skb_list[head]);
871 txo->sent_skb_list[head] = skb;
872 txo->last_req_hdr = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530873 atomic_add(num_frags, &txq->used);
874 txo->last_req_wrb_cnt = num_frags;
875 txo->pend_wrb_cnt += num_frags;
876}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700877
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530878/* Setup a WRB fragment (buffer descriptor) for xmit */
879static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
880 int len)
881{
882 struct be_eth_wrb *wrb;
883 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700884
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530885 wrb = queue_head_node(txq);
886 wrb_fill(wrb, busaddr, len);
887 queue_head_inc(txq);
888}
889
890/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
891 * was invoked. The producer index is restored to the previous packet and the
892 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
893 */
894static void be_xmit_restore(struct be_adapter *adapter,
895 struct be_tx_obj *txo, u16 head, bool map_single,
896 u32 copied)
897{
898 struct device *dev;
899 struct be_eth_wrb *wrb;
900 struct be_queue_info *txq = &txo->q;
901
902 dev = &adapter->pdev->dev;
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500903 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530904
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500905 /* skip the first wrb (hdr); it's not mapped */
906 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000907 while (copied) {
908 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000909 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000910 map_single = false;
Sathya Perlaf986afc2015-02-06 08:18:43 -0500911 copied -= le32_to_cpu(wrb->frag_len);
Sathya Perla7101e112010-03-22 20:41:12 +0000912 queue_head_inc(txq);
913 }
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530914
Sathya Perla5f07b3c2015-01-05 05:48:34 -0500915 txq->head = head;
Sriharsha Basavapatna79a0d7d2015-02-16 08:03:46 +0530916}
917
918/* Enqueue the given packet for transmit. This routine allocates WRBs for the
919 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
920 * of WRBs used up by the packet.
921 */
922static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
923 struct sk_buff *skb,
924 struct be_wrb_params *wrb_params)
925{
926 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
927 struct device *dev = &adapter->pdev->dev;
928 struct be_queue_info *txq = &txo->q;
929 bool map_single = false;
930 u16 head = txq->head;
931 dma_addr_t busaddr;
932 int len;
933
934 head = be_tx_get_wrb_hdr(txo);
935
936 if (skb->len > skb->data_len) {
937 len = skb_headlen(skb);
938
939 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
940 if (dma_mapping_error(dev, busaddr))
941 goto dma_err;
942 map_single = true;
943 be_tx_setup_wrb_frag(txo, busaddr, len);
944 copied += len;
945 }
946
947 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
948 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
949 len = skb_frag_size(frag);
950
951 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
952 if (dma_mapping_error(dev, busaddr))
953 goto dma_err;
954 be_tx_setup_wrb_frag(txo, busaddr, len);
955 copied += len;
956 }
957
958 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
959
960 be_tx_stats_update(txo, skb);
961 return wrb_cnt;
962
963dma_err:
964 adapter->drv_stats.dma_map_errors++;
965 be_xmit_restore(adapter, txo, head, map_single, copied);
Sathya Perla7101e112010-03-22 20:41:12 +0000966 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700967}
968
Sathya Perlaf7062ee2015-02-06 08:18:35 -0500969static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
970{
971 return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
972}
973
Somnath Kotur93040ae2012-06-26 22:32:10 +0000974static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000975 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530976 struct be_wrb_params
977 *wrb_params)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000978{
979 u16 vlan_tag = 0;
980
981 skb = skb_share_check(skb, GFP_ATOMIC);
982 if (unlikely(!skb))
983 return skb;
984
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100985 if (skb_vlan_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000986 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530987
988 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
989 if (!vlan_tag)
990 vlan_tag = adapter->pvid;
991 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
992 * skip VLAN insertion
993 */
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +0530994 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530995 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000996
997 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100998 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
999 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001000 if (unlikely(!skb))
1001 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001002 skb->vlan_tci = 0;
1003 }
1004
1005 /* Insert the outer VLAN, if any */
1006 if (adapter->qnq_vid) {
1007 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +01001008 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1009 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001010 if (unlikely(!skb))
1011 return skb;
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301012 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001013 }
1014
Somnath Kotur93040ae2012-06-26 22:32:10 +00001015 return skb;
1016}
1017
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001018static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1019{
1020 struct ethhdr *eh = (struct ethhdr *)skb->data;
1021 u16 offset = ETH_HLEN;
1022
1023 if (eh->h_proto == htons(ETH_P_IPV6)) {
1024 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1025
1026 offset += sizeof(struct ipv6hdr);
1027 if (ip6h->nexthdr != NEXTHDR_TCP &&
1028 ip6h->nexthdr != NEXTHDR_UDP) {
1029 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +05301030 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001031
1032 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1033 if (ehdr->hdrlen == 0xff)
1034 return true;
1035 }
1036 }
1037 return false;
1038}
1039
1040static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1041{
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001042 return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001043}
1044
Sathya Perla748b5392014-05-09 13:29:13 +05301045static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001046{
Sathya Perlaee9c7992013-05-22 23:04:55 +00001047 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001048}
1049
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301050static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1051 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301052 struct be_wrb_params
1053 *wrb_params)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001054{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001055 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001056 unsigned int eth_hdr_len;
1057 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +00001058
Ajit Khaparde1297f9d2013-04-24 11:52:28 +00001059 /* For padded packets, BE HW modifies tot_len field in IP header
1060 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001061 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001062 */
Sathya Perlaee9c7992013-05-22 23:04:55 +00001063 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1064 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +00001065 if (skb->len <= 60 &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001066 (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +00001067 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +00001068 ip = (struct iphdr *)ip_hdr(skb);
1069 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1070 }
1071
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001072 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301073 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001074 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301075 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001076 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301077 BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +00001078
Somnath Kotur93040ae2012-06-26 22:32:10 +00001079 /* HW has a bug wherein it will calculate CSUM for VLAN
1080 * pkts even though it is disabled.
1081 * Manually insert VLAN in pkt.
1082 */
1083 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Jiri Pirkodf8a39d2015-01-13 17:13:44 +01001084 skb_vlan_tag_present(skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301085 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001086 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301087 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001088 }
1089
1090 /* HW may lockup when VLAN HW tagging is requested on
1091 * certain ipv6 packets. Drop such pkts if the HW workaround to
1092 * skip HW tagging is not enabled by FW.
1093 */
1094 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +05301095 (adapter->pvid || adapter->qnq_vid) &&
1096 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001097 goto tx_drop;
1098
1099 /* Manual VLAN tag insertion to prevent:
1100 * ASIC lockup when the ASIC inserts VLAN tag into
1101 * certain ipv6 packets. Insert VLAN tags in driver,
1102 * and set event, completion, vlan bits accordingly
1103 * in the Tx WRB.
1104 */
1105 if (be_ipv6_tx_stall_chk(adapter, skb) &&
1106 be_vlan_tag_tx_chk(adapter, skb)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301107 skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001108 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301109 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001110 }
1111
Sathya Perlaee9c7992013-05-22 23:04:55 +00001112 return skb;
1113tx_drop:
1114 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301115err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001116 return NULL;
1117}
1118
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301119static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1120 struct sk_buff *skb,
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301121 struct be_wrb_params *wrb_params)
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301122{
1123 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1124 * less may cause a transmit stall on that port. So the work-around is
1125 * to pad short packets (<= 32 bytes) to a 36-byte length.
1126 */
1127 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001128 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301129 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301130 }
1131
1132 if (BEx_chip(adapter) || lancer_chip(adapter)) {
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301133 skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301134 if (!skb)
1135 return NULL;
1136 }
1137
1138 return skb;
1139}
1140
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001141static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1142{
1143 struct be_queue_info *txq = &txo->q;
1144 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1145
1146 /* Mark the last request eventable if it hasn't been marked already */
1147 if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1148 hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1149
1150 /* compose a dummy wrb if there are odd set of wrbs to notify */
1151 if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
Sathya Perlaf986afc2015-02-06 08:18:43 -05001152 wrb_fill_dummy(queue_head_node(txq));
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001153 queue_head_inc(txq);
1154 atomic_inc(&txq->used);
1155 txo->pend_wrb_cnt++;
1156 hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1157 TX_HDR_WRB_NUM_SHIFT);
1158 hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1159 TX_HDR_WRB_NUM_SHIFT);
1160 }
1161 be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1162 txo->pend_wrb_cnt = 0;
1163}
1164
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301165/* OS2BMC related */
1166
1167#define DHCP_CLIENT_PORT 68
1168#define DHCP_SERVER_PORT 67
1169#define NET_BIOS_PORT1 137
1170#define NET_BIOS_PORT2 138
1171#define DHCPV6_RAS_PORT 547
1172
1173#define is_mc_allowed_on_bmc(adapter, eh) \
1174 (!is_multicast_filt_enabled(adapter) && \
1175 is_multicast_ether_addr(eh->h_dest) && \
1176 !is_broadcast_ether_addr(eh->h_dest))
1177
1178#define is_bc_allowed_on_bmc(adapter, eh) \
1179 (!is_broadcast_filt_enabled(adapter) && \
1180 is_broadcast_ether_addr(eh->h_dest))
1181
1182#define is_arp_allowed_on_bmc(adapter, skb) \
1183 (is_arp(skb) && is_arp_filt_enabled(adapter))
1184
1185#define is_broadcast_packet(eh, adapter) \
1186 (is_multicast_ether_addr(eh->h_dest) && \
1187 !compare_ether_addr(eh->h_dest, adapter->netdev->broadcast))
1188
1189#define is_arp(skb) (skb->protocol == htons(ETH_P_ARP))
1190
1191#define is_arp_filt_enabled(adapter) \
1192 (adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1193
1194#define is_dhcp_client_filt_enabled(adapter) \
1195 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1196
1197#define is_dhcp_srvr_filt_enabled(adapter) \
1198 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1199
1200#define is_nbios_filt_enabled(adapter) \
1201 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1202
1203#define is_ipv6_na_filt_enabled(adapter) \
1204 (adapter->bmc_filt_mask & \
1205 BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1206
1207#define is_ipv6_ra_filt_enabled(adapter) \
1208 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1209
1210#define is_ipv6_ras_filt_enabled(adapter) \
1211 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1212
1213#define is_broadcast_filt_enabled(adapter) \
1214 (adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1215
1216#define is_multicast_filt_enabled(adapter) \
1217 (adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1218
1219static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1220 struct sk_buff **skb)
1221{
1222 struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1223 bool os2bmc = false;
1224
1225 if (!be_is_os2bmc_enabled(adapter))
1226 goto done;
1227
1228 if (!is_multicast_ether_addr(eh->h_dest))
1229 goto done;
1230
1231 if (is_mc_allowed_on_bmc(adapter, eh) ||
1232 is_bc_allowed_on_bmc(adapter, eh) ||
1233 is_arp_allowed_on_bmc(adapter, (*skb))) {
1234 os2bmc = true;
1235 goto done;
1236 }
1237
1238 if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1239 struct ipv6hdr *hdr = ipv6_hdr((*skb));
1240 u8 nexthdr = hdr->nexthdr;
1241
1242 if (nexthdr == IPPROTO_ICMPV6) {
1243 struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1244
1245 switch (icmp6->icmp6_type) {
1246 case NDISC_ROUTER_ADVERTISEMENT:
1247 os2bmc = is_ipv6_ra_filt_enabled(adapter);
1248 goto done;
1249 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1250 os2bmc = is_ipv6_na_filt_enabled(adapter);
1251 goto done;
1252 default:
1253 break;
1254 }
1255 }
1256 }
1257
1258 if (is_udp_pkt((*skb))) {
1259 struct udphdr *udp = udp_hdr((*skb));
1260
1261 switch (udp->dest) {
1262 case DHCP_CLIENT_PORT:
1263 os2bmc = is_dhcp_client_filt_enabled(adapter);
1264 goto done;
1265 case DHCP_SERVER_PORT:
1266 os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1267 goto done;
1268 case NET_BIOS_PORT1:
1269 case NET_BIOS_PORT2:
1270 os2bmc = is_nbios_filt_enabled(adapter);
1271 goto done;
1272 case DHCPV6_RAS_PORT:
1273 os2bmc = is_ipv6_ras_filt_enabled(adapter);
1274 goto done;
1275 default:
1276 break;
1277 }
1278 }
1279done:
1280 /* For packets over a vlan, which are destined
1281 * to BMC, asic expects the vlan to be inline in the packet.
1282 */
1283 if (os2bmc)
1284 *skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1285
1286 return os2bmc;
1287}
1288
Sathya Perlaee9c7992013-05-22 23:04:55 +00001289static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1290{
1291 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001292 u16 q_idx = skb_get_queue_mapping(skb);
1293 struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301294 struct be_wrb_params wrb_params = { 0 };
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301295 bool flush = !skb->xmit_more;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001296 u16 wrb_cnt;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001297
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301298 skb = be_xmit_workarounds(adapter, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001299 if (unlikely(!skb))
1300 goto drop;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001301
Sriharsha Basavapatna804abcd2015-02-16 08:03:45 +05301302 be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1303
1304 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001305 if (unlikely(!wrb_cnt)) {
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001306 dev_kfree_skb_any(skb);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001307 goto drop;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001308 }
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001309
Venkata Duvvuru760c2952015-05-13 13:00:14 +05301310 /* if os2bmc is enabled and if the pkt is destined to bmc,
1311 * enqueue the pkt a 2nd time with mgmt bit set.
1312 */
1313 if (be_send_pkt_to_bmc(adapter, &skb)) {
1314 BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1315 wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1316 if (unlikely(!wrb_cnt))
1317 goto drop;
1318 else
1319 skb_get(skb);
1320 }
1321
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05301322 if (be_is_txq_full(txo)) {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05001323 netif_stop_subqueue(netdev, q_idx);
1324 tx_stats(txo)->tx_stops++;
1325 }
1326
1327 if (flush || __netif_subqueue_stopped(netdev, q_idx))
1328 be_xmit_flush(adapter, txo);
1329
1330 return NETDEV_TX_OK;
1331drop:
1332 tx_stats(txo)->tx_drv_drops++;
1333 /* Flush the already enqueued tx requests */
1334 if (flush && txo->pend_wrb_cnt)
1335 be_xmit_flush(adapter, txo);
1336
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001337 return NETDEV_TX_OK;
1338}
1339
1340static int be_change_mtu(struct net_device *netdev, int new_mtu)
1341{
1342 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301343 struct device *dev = &adapter->pdev->dev;
1344
1345 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1346 dev_info(dev, "MTU must be between %d and %d bytes\n",
1347 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001348 return -EINVAL;
1349 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301350
1351 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301352 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353 netdev->mtu = new_mtu;
1354 return 0;
1355}
1356
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001357static inline bool be_in_all_promisc(struct be_adapter *adapter)
1358{
1359 return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1360 BE_IF_FLAGS_ALL_PROMISCUOUS;
1361}
1362
1363static int be_set_vlan_promisc(struct be_adapter *adapter)
1364{
1365 struct device *dev = &adapter->pdev->dev;
1366 int status;
1367
1368 if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1369 return 0;
1370
1371 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1372 if (!status) {
1373 dev_info(dev, "Enabled VLAN promiscuous mode\n");
1374 adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1375 } else {
1376 dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1377 }
1378 return status;
1379}
1380
1381static int be_clear_vlan_promisc(struct be_adapter *adapter)
1382{
1383 struct device *dev = &adapter->pdev->dev;
1384 int status;
1385
1386 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1387 if (!status) {
1388 dev_info(dev, "Disabling VLAN promiscuous mode\n");
1389 adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1390 }
1391 return status;
1392}
1393
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001394/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001395 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1396 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 */
Sathya Perla10329df2012-06-05 19:37:18 +00001398static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001399{
Vasundhara Volam50762662014-09-12 17:39:14 +05301400 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001401 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301402 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001403 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001404
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001405 /* No need to further configure vids if in promiscuous mode */
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001406 if (be_in_all_promisc(adapter))
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001407 return 0;
1408
Sathya Perla92bf14a2013-08-27 16:57:32 +05301409 if (adapter->vlans_added > be_max_vlans(adapter))
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001410 return be_set_vlan_promisc(adapter);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001411
1412 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301413 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1414 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001415
Vasundhara Volam435452a2015-03-20 06:28:23 -04001416 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001417 if (status) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001418 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001419 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP77be8c12015-05-06 05:30:35 -04001420 if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1421 addl_status(status) ==
Kalesh AP4c600052014-05-30 19:06:26 +05301422 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001423 return be_set_vlan_promisc(adapter);
1424 } else if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1425 status = be_clear_vlan_promisc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001426 }
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001427 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001428}
1429
Patrick McHardy80d5c362013-04-19 02:04:28 +00001430static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001431{
1432 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001433 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001435 /* Packets with VID 0 are always received by Lancer by default */
1436 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301437 return status;
1438
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301439 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301440 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001441
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301442 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301443 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001444
Somnath Kotura6b74e02014-01-21 15:50:55 +05301445 status = be_vid_config(adapter);
1446 if (status) {
1447 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301448 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301449 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301450
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001451 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452}
1453
Patrick McHardy80d5c362013-04-19 02:04:28 +00001454static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455{
1456 struct be_adapter *adapter = netdev_priv(netdev);
1457
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001458 /* Packets with VID 0 are always received by Lancer by default */
1459 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301460 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001461
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301462 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301463 adapter->vlans_added--;
1464
1465 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001466}
1467
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001468static void be_clear_all_promisc(struct be_adapter *adapter)
Somnath kotur7ad09452014-03-03 14:24:43 +05301469{
Sathya Perlaac34b742015-02-06 08:18:40 -05001470 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, OFF);
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001471 adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
1472}
1473
1474static void be_set_all_promisc(struct be_adapter *adapter)
1475{
1476 be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1477 adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1478}
1479
1480static void be_set_mc_promisc(struct be_adapter *adapter)
1481{
1482 int status;
1483
1484 if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1485 return;
1486
1487 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1488 if (!status)
1489 adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1490}
1491
1492static void be_set_mc_list(struct be_adapter *adapter)
1493{
1494 int status;
1495
1496 status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1497 if (!status)
1498 adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1499 else
1500 be_set_mc_promisc(adapter);
1501}
1502
1503static void be_set_uc_list(struct be_adapter *adapter)
1504{
1505 struct netdev_hw_addr *ha;
1506 int i = 1; /* First slot is claimed by the Primary MAC */
1507
1508 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
1509 be_cmd_pmac_del(adapter, adapter->if_handle,
1510 adapter->pmac_id[i], 0);
1511
1512 if (netdev_uc_count(adapter->netdev) > be_max_uc(adapter)) {
1513 be_set_all_promisc(adapter);
1514 return;
1515 }
1516
1517 netdev_for_each_uc_addr(ha, adapter->netdev) {
1518 adapter->uc_macs++; /* First slot is for Primary MAC */
1519 be_cmd_pmac_add(adapter, (u8 *)ha->addr, adapter->if_handle,
1520 &adapter->pmac_id[adapter->uc_macs], 0);
1521 }
1522}
1523
1524static void be_clear_uc_list(struct be_adapter *adapter)
1525{
1526 int i;
1527
1528 for (i = 1; i < (adapter->uc_macs + 1); i++)
1529 be_cmd_pmac_del(adapter, adapter->if_handle,
1530 adapter->pmac_id[i], 0);
1531 adapter->uc_macs = 0;
Somnath kotur7ad09452014-03-03 14:24:43 +05301532}
1533
Sathya Perlaa54769f2011-10-24 02:45:00 +00001534static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001535{
1536 struct be_adapter *adapter = netdev_priv(netdev);
1537
1538 if (netdev->flags & IFF_PROMISC) {
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001539 be_set_all_promisc(adapter);
1540 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001542
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001543 /* Interface was previously in promiscuous mode; disable it */
1544 if (be_in_all_promisc(adapter)) {
1545 be_clear_all_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001546 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001547 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001548 }
1549
Sathya Perlae7b909a2009-11-22 22:01:10 +00001550 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001551 if (netdev->flags & IFF_ALLMULTI ||
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001552 netdev_mc_count(netdev) > be_max_mc(adapter)) {
1553 be_set_mc_promisc(adapter);
Kalesh APa0794882014-05-30 19:06:23 +05301554 return;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001555 }
Kalesh APa0794882014-05-30 19:06:23 +05301556
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05001557 if (netdev_uc_count(netdev) != adapter->uc_macs)
1558 be_set_uc_list(adapter);
1559
1560 be_set_mc_list(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561}
1562
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001563static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1564{
1565 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001566 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001567 int status;
1568
Sathya Perla11ac75e2011-12-13 00:58:50 +00001569 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001570 return -EPERM;
1571
Sathya Perla11ac75e2011-12-13 00:58:50 +00001572 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001573 return -EINVAL;
1574
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301575 /* Proceed further only if user provided MAC is different
1576 * from active MAC
1577 */
1578 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1579 return 0;
1580
Sathya Perla3175d8c2013-07-23 15:25:03 +05301581 if (BEx_chip(adapter)) {
1582 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1583 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001584
Sathya Perla11ac75e2011-12-13 00:58:50 +00001585 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1586 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301587 } else {
1588 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1589 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001590 }
1591
Kalesh APabccf232014-07-17 16:20:24 +05301592 if (status) {
1593 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1594 mac, vf, status);
1595 return be_cmd_status(status);
1596 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001597
Kalesh APabccf232014-07-17 16:20:24 +05301598 ether_addr_copy(vf_cfg->mac_addr, mac);
1599
1600 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001601}
1602
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001603static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301604 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001605{
1606 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001607 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001608
Sathya Perla11ac75e2011-12-13 00:58:50 +00001609 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001610 return -EPERM;
1611
Sathya Perla11ac75e2011-12-13 00:58:50 +00001612 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001613 return -EINVAL;
1614
1615 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001616 vi->max_tx_rate = vf_cfg->tx_rate;
1617 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001618 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1619 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001620 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301621 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Kalesh APe7bcbd72015-05-06 05:30:32 -04001622 vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001623
1624 return 0;
1625}
1626
Vasundhara Volam435452a2015-03-20 06:28:23 -04001627static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1628{
1629 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1630 u16 vids[BE_NUM_VLANS_SUPPORTED];
1631 int vf_if_id = vf_cfg->if_handle;
1632 int status;
1633
1634 /* Enable Transparent VLAN Tagging */
Kalesh APe7bcbd72015-05-06 05:30:32 -04001635 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001636 if (status)
1637 return status;
1638
1639 /* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1640 vids[0] = 0;
1641 status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1642 if (!status)
1643 dev_info(&adapter->pdev->dev,
1644 "Cleared guest VLANs on VF%d", vf);
1645
1646 /* After TVT is enabled, disallow VFs to program VLAN filters */
1647 if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1648 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1649 ~BE_PRIV_FILTMGMT, vf + 1);
1650 if (!status)
1651 vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1652 }
1653 return 0;
1654}
1655
1656static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1657{
1658 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1659 struct device *dev = &adapter->pdev->dev;
1660 int status;
1661
1662 /* Reset Transparent VLAN Tagging. */
1663 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
Kalesh APe7bcbd72015-05-06 05:30:32 -04001664 vf_cfg->if_handle, 0, 0);
Vasundhara Volam435452a2015-03-20 06:28:23 -04001665 if (status)
1666 return status;
1667
1668 /* Allow VFs to program VLAN filtering */
1669 if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1670 status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1671 BE_PRIV_FILTMGMT, vf + 1);
1672 if (!status) {
1673 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1674 dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1675 }
1676 }
1677
1678 dev_info(dev,
1679 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1680 return 0;
1681}
1682
Sathya Perla748b5392014-05-09 13:29:13 +05301683static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001684{
1685 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001686 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Vasundhara Volam435452a2015-03-20 06:28:23 -04001687 int status;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001688
Sathya Perla11ac75e2011-12-13 00:58:50 +00001689 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001690 return -EPERM;
1691
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001692 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001693 return -EINVAL;
1694
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001695 if (vlan || qos) {
1696 vlan |= qos << VLAN_PRIO_SHIFT;
Vasundhara Volam435452a2015-03-20 06:28:23 -04001697 status = be_set_vf_tvt(adapter, vf, vlan);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001698 } else {
Vasundhara Volam435452a2015-03-20 06:28:23 -04001699 status = be_clear_vf_tvt(adapter, vf);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001700 }
1701
Kalesh APabccf232014-07-17 16:20:24 +05301702 if (status) {
1703 dev_err(&adapter->pdev->dev,
Vasundhara Volam435452a2015-03-20 06:28:23 -04001704 "VLAN %d config on VF %d failed : %#x\n", vlan, vf,
1705 status);
Kalesh APabccf232014-07-17 16:20:24 +05301706 return be_cmd_status(status);
1707 }
1708
1709 vf_cfg->vlan_tag = vlan;
Kalesh APabccf232014-07-17 16:20:24 +05301710 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001711}
1712
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001713static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1714 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001715{
1716 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301717 struct device *dev = &adapter->pdev->dev;
1718 int percent_rate, status = 0;
1719 u16 link_speed = 0;
1720 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001721
Sathya Perla11ac75e2011-12-13 00:58:50 +00001722 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001723 return -EPERM;
1724
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001725 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001726 return -EINVAL;
1727
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001728 if (min_tx_rate)
1729 return -EINVAL;
1730
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301731 if (!max_tx_rate)
1732 goto config_qos;
1733
1734 status = be_cmd_link_status_query(adapter, &link_speed,
1735 &link_status, 0);
1736 if (status)
1737 goto err;
1738
1739 if (!link_status) {
1740 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301741 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301742 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001743 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001744
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301745 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1746 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1747 link_speed);
1748 status = -EINVAL;
1749 goto err;
1750 }
1751
1752 /* On Skyhawk the QOS setting must be done only as a % value */
1753 percent_rate = link_speed / 100;
1754 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1755 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1756 percent_rate);
1757 status = -EINVAL;
1758 goto err;
1759 }
1760
1761config_qos:
1762 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001763 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301764 goto err;
1765
1766 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1767 return 0;
1768
1769err:
1770 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1771 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301772 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001773}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301774
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301775static int be_set_vf_link_state(struct net_device *netdev, int vf,
1776 int link_state)
1777{
1778 struct be_adapter *adapter = netdev_priv(netdev);
1779 int status;
1780
1781 if (!sriov_enabled(adapter))
1782 return -EPERM;
1783
1784 if (vf >= adapter->num_vfs)
1785 return -EINVAL;
1786
1787 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301788 if (status) {
1789 dev_err(&adapter->pdev->dev,
1790 "Link state change on VF %d failed: %#x\n", vf, status);
1791 return be_cmd_status(status);
1792 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301793
Kalesh APabccf232014-07-17 16:20:24 +05301794 adapter->vf_cfg[vf].plink_tracking = link_state;
1795
1796 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301797}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001798
Kalesh APe7bcbd72015-05-06 05:30:32 -04001799static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
1800{
1801 struct be_adapter *adapter = netdev_priv(netdev);
1802 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1803 u8 spoofchk;
1804 int status;
1805
1806 if (!sriov_enabled(adapter))
1807 return -EPERM;
1808
1809 if (vf >= adapter->num_vfs)
1810 return -EINVAL;
1811
1812 if (BEx_chip(adapter))
1813 return -EOPNOTSUPP;
1814
1815 if (enable == vf_cfg->spoofchk)
1816 return 0;
1817
1818 spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
1819
1820 status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
1821 0, spoofchk);
1822 if (status) {
1823 dev_err(&adapter->pdev->dev,
1824 "Spoofchk change on VF %d failed: %#x\n", vf, status);
1825 return be_cmd_status(status);
1826 }
1827
1828 vf_cfg->spoofchk = enable;
1829 return 0;
1830}
1831
Sathya Perla2632baf2013-10-01 16:00:00 +05301832static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1833 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834{
Sathya Perla2632baf2013-10-01 16:00:00 +05301835 aic->rx_pkts_prev = rx_pkts;
1836 aic->tx_reqs_prev = tx_pkts;
1837 aic->jiffies = now;
1838}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001839
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001840static int be_get_new_eqd(struct be_eq_obj *eqo)
Sathya Perla2632baf2013-10-01 16:00:00 +05301841{
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001842 struct be_adapter *adapter = eqo->adapter;
1843 int eqd, start;
Sathya Perla2632baf2013-10-01 16:00:00 +05301844 struct be_aic_obj *aic;
Sathya Perla2632baf2013-10-01 16:00:00 +05301845 struct be_rx_obj *rxo;
1846 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001847 u64 rx_pkts = 0, tx_pkts = 0;
Sathya Perla2632baf2013-10-01 16:00:00 +05301848 ulong now;
1849 u32 pps, delta;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001850 int i;
1851
1852 aic = &adapter->aic_obj[eqo->idx];
1853 if (!aic->enable) {
1854 if (aic->jiffies)
1855 aic->jiffies = 0;
1856 eqd = aic->et_eqd;
1857 return eqd;
1858 }
1859
1860 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
1861 do {
1862 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1863 rx_pkts += rxo->stats.rx_pkts;
1864 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1865 }
1866
1867 for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
1868 do {
1869 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1870 tx_pkts += txo->stats.tx_reqs;
1871 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1872 }
1873
1874 /* Skip, if wrapped around or first calculation */
1875 now = jiffies;
1876 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1877 rx_pkts < aic->rx_pkts_prev ||
1878 tx_pkts < aic->tx_reqs_prev) {
1879 be_aic_update(aic, rx_pkts, tx_pkts, now);
1880 return aic->prev_eqd;
1881 }
1882
1883 delta = jiffies_to_msecs(now - aic->jiffies);
1884 if (delta == 0)
1885 return aic->prev_eqd;
1886
1887 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1888 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1889 eqd = (pps / 15000) << 2;
1890
1891 if (eqd < 8)
1892 eqd = 0;
1893 eqd = min_t(u32, eqd, aic->max_eqd);
1894 eqd = max_t(u32, eqd, aic->min_eqd);
1895
1896 be_aic_update(aic, rx_pkts, tx_pkts, now);
1897
1898 return eqd;
1899}
1900
1901/* For Skyhawk-R only */
1902static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
1903{
1904 struct be_adapter *adapter = eqo->adapter;
1905 struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
1906 ulong now = jiffies;
1907 int eqd;
1908 u32 mult_enc;
1909
1910 if (!aic->enable)
1911 return 0;
1912
1913 if (time_before_eq(now, aic->jiffies) ||
1914 jiffies_to_msecs(now - aic->jiffies) < 1)
1915 eqd = aic->prev_eqd;
1916 else
1917 eqd = be_get_new_eqd(eqo);
1918
1919 if (eqd > 100)
1920 mult_enc = R2I_DLY_ENC_1;
1921 else if (eqd > 60)
1922 mult_enc = R2I_DLY_ENC_2;
1923 else if (eqd > 20)
1924 mult_enc = R2I_DLY_ENC_3;
1925 else
1926 mult_enc = R2I_DLY_ENC_0;
1927
1928 aic->prev_eqd = eqd;
1929
1930 return mult_enc;
1931}
1932
1933void be_eqd_update(struct be_adapter *adapter, bool force_update)
1934{
1935 struct be_set_eqd set_eqd[MAX_EVT_QS];
1936 struct be_aic_obj *aic;
1937 struct be_eq_obj *eqo;
1938 int i, num = 0, eqd;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001939
Sathya Perla2632baf2013-10-01 16:00:00 +05301940 for_all_evt_queues(adapter, eqo, i) {
1941 aic = &adapter->aic_obj[eqo->idx];
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04001942 eqd = be_get_new_eqd(eqo);
1943 if (force_update || eqd != aic->prev_eqd) {
Sathya Perla2632baf2013-10-01 16:00:00 +05301944 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1945 set_eqd[num].eq_id = eqo->q.id;
1946 aic->prev_eqd = eqd;
1947 num++;
1948 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001949 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301950
1951 if (num)
1952 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001953}
1954
Sathya Perla3abcded2010-10-03 22:12:27 -07001955static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301956 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001957{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001958 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001959
Sathya Perlaab1594e2011-07-25 19:10:15 +00001960 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001961 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001962 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001963 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001964 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001965 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001966 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001967 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001968 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969}
1970
Sathya Perla2e588f82011-03-11 02:49:26 +00001971static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001972{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001973 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301974 * Also ignore ipcksm for ipv6 pkts
1975 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001976 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301977 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001978}
1979
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301980static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001982 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001984 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301985 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986
Sathya Perla3abcded2010-10-03 22:12:27 -07001987 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988 BUG_ON(!rx_page_info->page);
1989
Sathya Perlae50287b2014-03-04 12:14:38 +05301990 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001991 dma_unmap_page(&adapter->pdev->dev,
1992 dma_unmap_addr(rx_page_info, bus),
1993 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301994 rx_page_info->last_frag = false;
1995 } else {
1996 dma_sync_single_for_cpu(&adapter->pdev->dev,
1997 dma_unmap_addr(rx_page_info, bus),
1998 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001999 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002000
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302001 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002002 atomic_dec(&rxq->used);
2003 return rx_page_info;
2004}
2005
2006/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002007static void be_rx_compl_discard(struct be_rx_obj *rxo,
2008 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002009{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002011 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002012
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002013 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302014 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00002015 put_page(page_info->page);
2016 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017 }
2018}
2019
2020/*
2021 * skb_fill_rx_data forms a complete skb for an ether frame
2022 * indicated by rxcp.
2023 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002024static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2025 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00002028 u16 i, j;
2029 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002030 u8 *start;
2031
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302032 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002033 start = page_address(page_info->page) + page_info->page_offset;
2034 prefetch(start);
2035
2036 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002037 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002038
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002039 skb->len = curr_frag_len;
2040 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002041 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042 /* Complete packet has now been moved to data */
2043 put_page(page_info->page);
2044 skb->data_len = 0;
2045 skb->tail += curr_frag_len;
2046 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00002047 hdr_len = ETH_HLEN;
2048 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002049 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00002050 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002051 skb_shinfo(skb)->frags[0].page_offset =
2052 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05302053 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2054 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002055 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002056 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002057 skb->tail += hdr_len;
2058 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00002059 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060
Sathya Perla2e588f82011-03-11 02:49:26 +00002061 if (rxcp->pkt_size <= rx_frag_size) {
2062 BUG_ON(rxcp->num_rcvd != 1);
2063 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002064 }
2065
2066 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00002067 remaining = rxcp->pkt_size - curr_frag_len;
2068 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302069 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00002070 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002072 /* Coalesce all frags from the same physical page in one slot */
2073 if (page_info->page_offset == 0) {
2074 /* Fresh page */
2075 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002076 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002077 skb_shinfo(skb)->frags[j].page_offset =
2078 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002079 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002080 skb_shinfo(skb)->nr_frags++;
2081 } else {
2082 put_page(page_info->page);
2083 }
2084
Eric Dumazet9e903e02011-10-18 21:00:24 +00002085 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002086 skb->len += curr_frag_len;
2087 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002088 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00002089 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00002090 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002091 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002092 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002093}
2094
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002095/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05302096static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002097 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002098{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002099 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002100 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002101 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00002102
Eric Dumazetbb349bb2012-01-25 03:56:30 +00002103 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00002104 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002105 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002106 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107 return;
2108 }
2109
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002110 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002111
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002112 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07002113 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00002114 else
2115 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002116
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002117 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002118 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002119 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002120 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302121
Tom Herbertb6c0e892014-08-27 21:27:17 -07002122 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302123 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124
Jiri Pirko343e43c2011-08-25 02:50:51 +00002125 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002126 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002127
2128 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002129}
2130
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002131/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09002132static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2133 struct napi_struct *napi,
2134 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002136 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002137 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002138 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00002139 u16 remaining, curr_frag_len;
2140 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00002141
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002142 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002143 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002144 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002145 return;
2146 }
2147
Sathya Perla2e588f82011-03-11 02:49:26 +00002148 remaining = rxcp->pkt_size;
2149 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302150 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002151
2152 curr_frag_len = min(remaining, rx_frag_size);
2153
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002154 /* Coalesce all frags from the same physical page in one slot */
2155 if (i == 0 || page_info->page_offset == 0) {
2156 /* First frag or Fresh page */
2157 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00002158 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002159 skb_shinfo(skb)->frags[j].page_offset =
2160 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00002161 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002162 } else {
2163 put_page(page_info->page);
2164 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00002165 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00002166 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002167 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168 memset(page_info, 0, sizeof(*page_info));
2169 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00002170 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002171
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002172 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00002173 skb->len = rxcp->pkt_size;
2174 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002175 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00002176 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00002177 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08002178 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05302179
Tom Herbertb6c0e892014-08-27 21:27:17 -07002180 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302181 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07002182
Jiri Pirko343e43c2011-08-25 02:50:51 +00002183 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00002184 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07002185
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002186 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002187}
2188
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002189static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2190 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002191{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302192 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2193 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2194 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2195 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2196 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2197 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2198 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2199 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2200 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2201 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2202 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002203 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302204 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2205 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002206 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302207 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05302208 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302209 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002210}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002211
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002212static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2213 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00002214{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302215 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2216 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2217 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2218 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2219 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2220 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2221 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2222 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2223 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2224 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2225 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002226 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302227 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2228 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00002229 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302230 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2231 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00002232}
2233
2234static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2235{
2236 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2237 struct be_rx_compl_info *rxcp = &rxo->rxcp;
2238 struct be_adapter *adapter = rxo->adapter;
2239
2240 /* For checking the valid bit it is Ok to use either definition as the
2241 * valid bit is at the same position in both v0 and v1 Rx compl */
2242 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243 return NULL;
2244
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002245 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00002246 be_dws_le_to_cpu(compl, sizeof(*compl));
2247
2248 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002249 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002250 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002251 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00002252
Somnath Koture38b1702013-05-29 22:55:56 +00002253 if (rxcp->ip_frag)
2254 rxcp->l4_csum = 0;
2255
Sathya Perla15d72182011-03-21 20:49:26 +00002256 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05302257 /* In QNQ modes, if qnq bit is not set, then the packet was
2258 * tagged only with the transparent outer vlan-tag and must
2259 * not be treated as a vlan packet by host
2260 */
2261 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00002262 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00002263
Sathya Perla15d72182011-03-21 20:49:26 +00002264 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04002265 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00002266
Somnath Kotur939cf302011-08-18 21:51:49 -07002267 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05302268 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00002269 rxcp->vlanf = 0;
2270 }
Sathya Perla2e588f82011-03-11 02:49:26 +00002271
2272 /* As the compl has been parsed, reset it; we wont touch it again */
2273 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274
Sathya Perla3abcded2010-10-03 22:12:27 -07002275 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002276 return rxcp;
2277}
2278
Eric Dumazet1829b082011-03-01 05:48:12 +00002279static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002280{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002281 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00002282
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002283 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00002284 gfp |= __GFP_COMP;
2285 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286}
2287
2288/*
2289 * Allocate a page, split it to fragments of size rx_frag_size and post as
2290 * receive buffers to BE
2291 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05302292static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002293{
Sathya Perla3abcded2010-10-03 22:12:27 -07002294 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08002295 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07002296 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002298 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002299 struct be_eth_rx_d *rxd;
2300 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302301 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302
Sathya Perla3abcded2010-10-03 22:12:27 -07002303 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05302304 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002305 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002306 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002307 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00002308 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 break;
2310 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01002311 page_dmaaddr = dma_map_page(dev, pagep, 0,
2312 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002313 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01002314 if (dma_mapping_error(dev, page_dmaaddr)) {
2315 put_page(pagep);
2316 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05302317 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01002318 break;
2319 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302320 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321 } else {
2322 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05302323 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002324 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302325 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002326 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327
2328 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05302329 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002330 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2331 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002332
2333 /* Any space left in the current big page for another frag? */
2334 if ((page_offset + rx_frag_size + rx_frag_size) >
2335 adapter->big_page_size) {
2336 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05302337 page_info->last_frag = true;
2338 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2339 } else {
2340 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002341 }
Sathya Perla26d92f92010-01-21 22:52:08 -08002342
2343 prev_page_info = page_info;
2344 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002345 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002346 }
Sathya Perlae50287b2014-03-04 12:14:38 +05302347
2348 /* Mark the last frag of a page when we break out of the above loop
2349 * with no more slots available in the RXQ
2350 */
2351 if (pagep) {
2352 prev_page_info->last_frag = true;
2353 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2354 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002355
2356 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002357 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302358 if (rxo->rx_post_starved)
2359 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302360 do {
Ajit Khaparde69304cc2015-04-08 16:59:48 -05002361 notify = min(MAX_NUM_POST_ERX_DB, posted);
Ajit Khapardec30d7262014-09-12 17:39:16 +05302362 be_rxq_notify(adapter, rxq->id, notify);
2363 posted -= notify;
2364 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07002365 } else if (atomic_read(&rxq->used) == 0) {
2366 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07002367 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369}
2370
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302371static struct be_tx_compl_info *be_tx_compl_get(struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372{
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302373 struct be_queue_info *tx_cq = &txo->cq;
2374 struct be_tx_compl_info *txcp = &txo->txcp;
2375 struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302377 if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378 return NULL;
2379
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302380 /* Ensure load ordering of valid bit dword and other dwords below */
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00002381 rmb();
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302382 be_dws_le_to_cpu(compl, sizeof(*compl));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302384 txcp->status = GET_TX_COMPL_BITS(status, compl);
2385 txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302387 compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388 queue_tail_inc(tx_cq);
2389 return txcp;
2390}
2391
Sathya Perla3c8def92011-06-12 20:01:58 +00002392static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302393 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002394{
Sathya Perla3c8def92011-06-12 20:01:58 +00002395 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002396 struct be_queue_info *txq = &txo->q;
2397 u16 frag_index, num_wrbs = 0;
2398 struct sk_buff *skb = NULL;
2399 bool unmap_skb_hdr = false;
2400 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002401
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002402 do {
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002403 if (sent_skbs[txq->tail]) {
2404 /* Free skb from prev req */
2405 if (skb)
2406 dev_consume_skb_any(skb);
2407 skb = sent_skbs[txq->tail];
2408 sent_skbs[txq->tail] = NULL;
2409 queue_tail_inc(txq); /* skip hdr wrb */
2410 num_wrbs++;
2411 unmap_skb_hdr = true;
2412 }
Alexander Duycka73b7962009-12-02 16:48:18 +00002413 wrb = queue_tail_node(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002414 frag_index = txq->tail;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002415 unmap_tx_frag(&adapter->pdev->dev, wrb,
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002416 (unmap_skb_hdr && skb_headlen(skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00002417 unmap_skb_hdr = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002418 queue_tail_inc(txq);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002419 num_wrbs++;
2420 } while (frag_index != last_index);
2421 dev_consume_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002422
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002423 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002424}
2425
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426/* Return the number of events in the event queue */
2427static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00002428{
2429 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002430 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00002431
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002432 do {
2433 eqe = queue_tail_node(&eqo->q);
2434 if (eqe->evt == 0)
2435 break;
2436
2437 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002438 eqe->evt = 0;
2439 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002440 queue_tail_inc(&eqo->q);
2441 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002442
2443 return num;
2444}
2445
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002446/* Leaves the EQ is disarmed state */
2447static void be_eq_clean(struct be_eq_obj *eqo)
2448{
2449 int num = events_get(eqo);
2450
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002451 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002452}
2453
Kalesh AP99b44302015-08-05 03:27:49 -04002454/* Free posted rx buffers that were not used */
2455static void be_rxq_clean(struct be_rx_obj *rxo)
2456{
2457 struct be_queue_info *rxq = &rxo->q;
2458 struct be_rx_page_info *page_info;
2459
2460 while (atomic_read(&rxq->used) > 0) {
2461 page_info = get_rx_page_info(rxo);
2462 put_page(page_info->page);
2463 memset(page_info, 0, sizeof(*page_info));
2464 }
2465 BUG_ON(atomic_read(&rxq->used));
2466 rxq->tail = 0;
2467 rxq->head = 0;
2468}
2469
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002470static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002471{
Sathya Perla3abcded2010-10-03 22:12:27 -07002472 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002473 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002474 struct be_adapter *adapter = rxo->adapter;
2475 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002476
Sathya Perlad23e9462012-12-17 19:38:51 +00002477 /* Consume pending rx completions.
2478 * Wait for the flush completion (identified by zero num_rcvd)
2479 * to arrive. Notify CQ even when there are no more CQ entries
2480 * for HW to flush partially coalesced CQ entries.
2481 * In Lancer, there is no need to wait for flush compl.
2482 */
2483 for (;;) {
2484 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302485 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002486 if (lancer_chip(adapter))
2487 break;
2488
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302489 if (flush_wait++ > 50 ||
2490 be_check_error(adapter,
2491 BE_ERROR_HW)) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002492 dev_warn(&adapter->pdev->dev,
2493 "did not receive flush compl\n");
2494 break;
2495 }
2496 be_cq_notify(adapter, rx_cq->id, true, 0);
2497 mdelay(1);
2498 } else {
2499 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002500 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002501 if (rxcp->num_rcvd == 0)
2502 break;
2503 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002504 }
2505
Sathya Perlad23e9462012-12-17 19:38:51 +00002506 /* After cleanup, leave the CQ in unarmed state */
2507 be_cq_notify(adapter, rx_cq->id, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002508}
2509
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002510static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002511{
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002512 u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2513 struct device *dev = &adapter->pdev->dev;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302514 struct be_tx_compl_info *txcp;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002515 struct be_queue_info *txq;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302516 struct be_tx_obj *txo;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002517 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002518
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302519 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002520 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002521 pending_txqs = adapter->num_tx_qs;
2522
2523 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302524 cmpl = 0;
2525 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002526 txq = &txo->q;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302527 while ((txcp = be_tx_compl_get(txo))) {
2528 num_wrbs +=
2529 be_tx_compl_process(adapter, txo,
2530 txcp->end_index);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002531 cmpl++;
2532 }
2533 if (cmpl) {
2534 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2535 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302536 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002537 }
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302538 if (!be_is_tx_compl_pending(txo))
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002539 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002540 }
2541
Venkata Duvvuru954f6822015-05-13 13:00:13 +05302542 if (pending_txqs == 0 || ++timeo > 10 ||
2543 be_check_error(adapter, BE_ERROR_HW))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002544 break;
2545
2546 mdelay(1);
2547 } while (true);
2548
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002549 /* Free enqueued TX that was never notified to HW */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002550 for_all_tx_queues(adapter, txo, i) {
2551 txq = &txo->q;
Sathya Perlab03388d2010-02-18 00:37:17 +00002552
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002553 if (atomic_read(&txq->used)) {
2554 dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2555 i, atomic_read(&txq->used));
2556 notified_idx = txq->tail;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002557 end_idx = txq->tail;
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002558 index_adv(&end_idx, atomic_read(&txq->used) - 1,
2559 txq->len);
2560 /* Use the tx-compl process logic to handle requests
2561 * that were not sent to the HW.
2562 */
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002563 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2564 atomic_sub(num_wrbs, &txq->used);
Sathya Perla5f07b3c2015-01-05 05:48:34 -05002565 BUG_ON(atomic_read(&txq->used));
2566 txo->pend_wrb_cnt = 0;
2567 /* Since hw was never notified of these requests,
2568 * reset TXQ indices
2569 */
2570 txq->head = notified_idx;
2571 txq->tail = notified_idx;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002572 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002573 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002574}
2575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002576static void be_evt_queues_destroy(struct be_adapter *adapter)
2577{
2578 struct be_eq_obj *eqo;
2579 int i;
2580
2581 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002582 if (eqo->q.created) {
2583 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002584 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302585 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302586 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002587 }
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002588 free_cpumask_var(eqo->affinity_mask);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002589 be_queue_free(adapter, &eqo->q);
2590 }
2591}
2592
2593static int be_evt_queues_create(struct be_adapter *adapter)
2594{
2595 struct be_queue_info *eq;
2596 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302597 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002598 int i, rc;
2599
Sathya Perla92bf14a2013-08-27 16:57:32 +05302600 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2601 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002602
2603 for_all_evt_queues(adapter, eqo, i) {
Rusty Russellf36963c2015-05-09 03:14:13 +09302604 int numa_node = dev_to_node(&adapter->pdev->dev);
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04002605 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2606 return -ENOMEM;
Rusty Russellf36963c2015-05-09 03:14:13 +09302607 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2608 eqo->affinity_mask);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302609 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2610 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302611 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302612 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002613 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002614 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302615 aic->max_eqd = BE_MAX_EQD;
2616 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002617
2618 eq = &eqo->q;
2619 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302620 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002621 if (rc)
2622 return rc;
2623
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302624 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002625 if (rc)
2626 return rc;
2627 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002628 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002629}
2630
Sathya Perla5fb379e2009-06-18 00:02:59 +00002631static void be_mcc_queues_destroy(struct be_adapter *adapter)
2632{
2633 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002634
Sathya Perla8788fdc2009-07-27 22:52:03 +00002635 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002636 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002637 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002638 be_queue_free(adapter, q);
2639
Sathya Perla8788fdc2009-07-27 22:52:03 +00002640 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002641 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002642 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002643 be_queue_free(adapter, q);
2644}
2645
2646/* Must be called only after TX qs are created as MCC shares TX EQ */
2647static int be_mcc_queues_create(struct be_adapter *adapter)
2648{
2649 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002650
Sathya Perla8788fdc2009-07-27 22:52:03 +00002651 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002652 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302653 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002654 goto err;
2655
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002656 /* Use the default EQ for MCC completions */
2657 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002658 goto mcc_cq_free;
2659
Sathya Perla8788fdc2009-07-27 22:52:03 +00002660 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002661 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2662 goto mcc_cq_destroy;
2663
Sathya Perla8788fdc2009-07-27 22:52:03 +00002664 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002665 goto mcc_q_free;
2666
2667 return 0;
2668
2669mcc_q_free:
2670 be_queue_free(adapter, q);
2671mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002672 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002673mcc_cq_free:
2674 be_queue_free(adapter, cq);
2675err:
2676 return -1;
2677}
2678
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002679static void be_tx_queues_destroy(struct be_adapter *adapter)
2680{
2681 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002682 struct be_tx_obj *txo;
2683 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002684
Sathya Perla3c8def92011-06-12 20:01:58 +00002685 for_all_tx_queues(adapter, txo, i) {
2686 q = &txo->q;
2687 if (q->created)
2688 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2689 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002690
Sathya Perla3c8def92011-06-12 20:01:58 +00002691 q = &txo->cq;
2692 if (q->created)
2693 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2694 be_queue_free(adapter, q);
2695 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002696}
2697
Sathya Perla77071332013-08-27 16:57:34 +05302698static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002699{
Sathya Perla73f394e2015-03-26 03:05:09 -04002700 struct be_queue_info *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002701 struct be_tx_obj *txo;
Sathya Perla73f394e2015-03-26 03:05:09 -04002702 struct be_eq_obj *eqo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302703 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002704
Sathya Perla92bf14a2013-08-27 16:57:32 +05302705 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002706
Sathya Perla3c8def92011-06-12 20:01:58 +00002707 for_all_tx_queues(adapter, txo, i) {
2708 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002709 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2710 sizeof(struct be_eth_tx_compl));
2711 if (status)
2712 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002713
John Stultz827da442013-10-07 15:51:58 -07002714 u64_stats_init(&txo->stats.sync);
2715 u64_stats_init(&txo->stats.sync_compl);
2716
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002717 /* If num_evt_qs is less than num_tx_qs, then more than
2718 * one txq share an eq
2719 */
Sathya Perla73f394e2015-03-26 03:05:09 -04002720 eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
2721 status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002722 if (status)
2723 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002724
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002725 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2726 sizeof(struct be_eth_wrb));
2727 if (status)
2728 return status;
2729
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002730 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002731 if (status)
2732 return status;
Sathya Perla73f394e2015-03-26 03:05:09 -04002733
2734 netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
2735 eqo->idx);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002736 }
2737
Sathya Perlad3791422012-09-28 04:39:44 +00002738 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2739 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002740 return 0;
2741}
2742
2743static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002744{
2745 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002746 struct be_rx_obj *rxo;
2747 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002748
Sathya Perla3abcded2010-10-03 22:12:27 -07002749 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002750 q = &rxo->cq;
2751 if (q->created)
2752 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2753 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002755}
2756
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002757static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002758{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002759 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002760 struct be_rx_obj *rxo;
2761 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002762
Sathya Perla92bf14a2013-08-27 16:57:32 +05302763 /* We can create as many RSS rings as there are EQs. */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002764 adapter->num_rss_qs = adapter->num_evt_qs;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302765
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002766 /* We'll use RSS only if atleast 2 RSS rings are supported. */
2767 if (adapter->num_rss_qs <= 1)
2768 adapter->num_rss_qs = 0;
2769
2770 adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
2771
2772 /* When the interface is not capable of RSS rings (and there is no
2773 * need to create a default RXQ) we'll still need one RXQ
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002774 */
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002775 if (adapter->num_rx_qs == 0)
2776 adapter->num_rx_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302777
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002778 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002779 for_all_rx_queues(adapter, rxo, i) {
2780 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002781 cq = &rxo->cq;
2782 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302783 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002784 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002785 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786
John Stultz827da442013-10-07 15:51:58 -07002787 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002788 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2789 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002790 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002791 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002792 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002793
Sathya Perlad3791422012-09-28 04:39:44 +00002794 dev_info(&adapter->pdev->dev,
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05002795 "created %d RX queue(s)\n", adapter->num_rx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002796 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002797}
2798
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002799static irqreturn_t be_intx(int irq, void *dev)
2800{
Sathya Perlae49cc342012-11-27 19:50:02 +00002801 struct be_eq_obj *eqo = dev;
2802 struct be_adapter *adapter = eqo->adapter;
2803 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002804
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002805 /* IRQ is not expected when NAPI is scheduled as the EQ
2806 * will not be armed.
2807 * But, this can happen on Lancer INTx where it takes
2808 * a while to de-assert INTx or in BE2 where occasionaly
2809 * an interrupt may be raised even when EQ is unarmed.
2810 * If NAPI is already scheduled, then counting & notifying
2811 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002812 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002813 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002814 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002815 __napi_schedule(&eqo->napi);
2816 if (num_evts)
2817 eqo->spurious_intr = 0;
2818 }
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002819 be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002820
2821 /* Return IRQ_HANDLED only for the the first spurious intr
2822 * after a valid intr to stop the kernel from branding
2823 * this irq as a bad one!
2824 */
2825 if (num_evts || eqo->spurious_intr++ == 0)
2826 return IRQ_HANDLED;
2827 else
2828 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002829}
2830
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002831static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002832{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002833 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002834
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04002835 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perla0b545a62012-11-23 00:27:18 +00002836 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002837 return IRQ_HANDLED;
2838}
2839
Sathya Perla2e588f82011-03-11 02:49:26 +00002840static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002841{
Somnath Koture38b1702013-05-29 22:55:56 +00002842 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002843}
2844
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002845static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302846 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002847{
Sathya Perla3abcded2010-10-03 22:12:27 -07002848 struct be_adapter *adapter = rxo->adapter;
2849 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002850 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002851 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302852 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002853
2854 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002855 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002856 if (!rxcp)
2857 break;
2858
Sathya Perla12004ae2011-08-02 19:57:46 +00002859 /* Is it a flush compl that has no data */
2860 if (unlikely(rxcp->num_rcvd == 0))
2861 goto loop_continue;
2862
2863 /* Discard compl with partial DMA Lancer B0 */
2864 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002865 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002866 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002867 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002868
Sathya Perla12004ae2011-08-02 19:57:46 +00002869 /* On BE drop pkts that arrive due to imperfect filtering in
2870 * promiscuous mode on some skews
2871 */
2872 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302873 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002874 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002875 goto loop_continue;
2876 }
2877
Sathya Perla6384a4d2013-10-25 10:40:16 +05302878 /* Don't do gro when we're busy_polling */
2879 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002880 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002881 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302882 be_rx_compl_process(rxo, napi, rxcp);
2883
Sathya Perla12004ae2011-08-02 19:57:46 +00002884loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302885 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002886 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002887 }
2888
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002889 if (work_done) {
2890 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002891
Sathya Perla6384a4d2013-10-25 10:40:16 +05302892 /* When an rx-obj gets into post_starved state, just
2893 * let be_worker do the posting.
2894 */
2895 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2896 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302897 be_post_rx_frags(rxo, GFP_ATOMIC,
2898 max_t(u32, MAX_RX_POST,
2899 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002900 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002901
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002902 return work_done;
2903}
2904
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302905static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302906{
2907 switch (status) {
2908 case BE_TX_COMP_HDR_PARSE_ERR:
2909 tx_stats(txo)->tx_hdr_parse_err++;
2910 break;
2911 case BE_TX_COMP_NDMA_ERR:
2912 tx_stats(txo)->tx_dma_err++;
2913 break;
2914 case BE_TX_COMP_ACL_ERR:
2915 tx_stats(txo)->tx_spoof_check_err++;
2916 break;
2917 }
2918}
2919
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302920static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
Kalesh AP512bb8a2014-09-02 09:56:49 +05302921{
2922 switch (status) {
2923 case LANCER_TX_COMP_LSO_ERR:
2924 tx_stats(txo)->tx_tso_err++;
2925 break;
2926 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2927 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2928 tx_stats(txo)->tx_spoof_check_err++;
2929 break;
2930 case LANCER_TX_COMP_QINQ_ERR:
2931 tx_stats(txo)->tx_qinq_err++;
2932 break;
2933 case LANCER_TX_COMP_PARITY_ERR:
2934 tx_stats(txo)->tx_internal_parity_err++;
2935 break;
2936 case LANCER_TX_COMP_DMA_ERR:
2937 tx_stats(txo)->tx_dma_err++;
2938 break;
2939 }
2940}
2941
Sathya Perlac8f64612014-09-02 09:56:55 +05302942static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2943 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944{
Sathya Perlac8f64612014-09-02 09:56:55 +05302945 int num_wrbs = 0, work_done = 0;
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302946 struct be_tx_compl_info *txcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002947
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302948 while ((txcp = be_tx_compl_get(txo))) {
2949 num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
Sathya Perlac8f64612014-09-02 09:56:55 +05302950 work_done++;
2951
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302952 if (txcp->status) {
Kalesh AP512bb8a2014-09-02 09:56:49 +05302953 if (lancer_chip(adapter))
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302954 lancer_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302955 else
Sriharsha Basavapatna152ffe52015-02-16 08:03:47 +05302956 be_update_tx_err(txo, txcp->status);
Kalesh AP512bb8a2014-09-02 09:56:49 +05302957 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002958 }
2959
2960 if (work_done) {
2961 be_cq_notify(adapter, txo->cq.id, true, work_done);
2962 atomic_sub(num_wrbs, &txo->q.used);
2963
2964 /* As Tx wrbs have been freed up, wake up netdev queue
2965 * if it was stopped due to lack of tx wrbs. */
2966 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sriharsha Basavapatnacf5671e2015-02-16 08:03:48 +05302967 be_can_txq_wake(txo)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002968 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002969 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002970
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002971 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2972 tx_stats(txo)->tx_compl += work_done;
2973 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2974 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002975}
Sathya Perla3c8def92011-06-12 20:01:58 +00002976
Sathya Perlaf7062ee2015-02-06 08:18:35 -05002977#ifdef CONFIG_NET_RX_BUSY_POLL
2978static inline bool be_lock_napi(struct be_eq_obj *eqo)
2979{
2980 bool status = true;
2981
2982 spin_lock(&eqo->lock); /* BH is already disabled */
2983 if (eqo->state & BE_EQ_LOCKED) {
2984 WARN_ON(eqo->state & BE_EQ_NAPI);
2985 eqo->state |= BE_EQ_NAPI_YIELD;
2986 status = false;
2987 } else {
2988 eqo->state = BE_EQ_NAPI;
2989 }
2990 spin_unlock(&eqo->lock);
2991 return status;
2992}
2993
2994static inline void be_unlock_napi(struct be_eq_obj *eqo)
2995{
2996 spin_lock(&eqo->lock); /* BH is already disabled */
2997
2998 WARN_ON(eqo->state & (BE_EQ_POLL | BE_EQ_NAPI_YIELD));
2999 eqo->state = BE_EQ_IDLE;
3000
3001 spin_unlock(&eqo->lock);
3002}
3003
3004static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3005{
3006 bool status = true;
3007
3008 spin_lock_bh(&eqo->lock);
3009 if (eqo->state & BE_EQ_LOCKED) {
3010 eqo->state |= BE_EQ_POLL_YIELD;
3011 status = false;
3012 } else {
3013 eqo->state |= BE_EQ_POLL;
3014 }
3015 spin_unlock_bh(&eqo->lock);
3016 return status;
3017}
3018
3019static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3020{
3021 spin_lock_bh(&eqo->lock);
3022
3023 WARN_ON(eqo->state & (BE_EQ_NAPI));
3024 eqo->state = BE_EQ_IDLE;
3025
3026 spin_unlock_bh(&eqo->lock);
3027}
3028
3029static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3030{
3031 spin_lock_init(&eqo->lock);
3032 eqo->state = BE_EQ_IDLE;
3033}
3034
3035static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3036{
3037 local_bh_disable();
3038
3039 /* It's enough to just acquire napi lock on the eqo to stop
3040 * be_busy_poll() from processing any queueus.
3041 */
3042 while (!be_lock_napi(eqo))
3043 mdelay(1);
3044
3045 local_bh_enable();
3046}
3047
3048#else /* CONFIG_NET_RX_BUSY_POLL */
3049
3050static inline bool be_lock_napi(struct be_eq_obj *eqo)
3051{
3052 return true;
3053}
3054
3055static inline void be_unlock_napi(struct be_eq_obj *eqo)
3056{
3057}
3058
3059static inline bool be_lock_busy_poll(struct be_eq_obj *eqo)
3060{
3061 return false;
3062}
3063
3064static inline void be_unlock_busy_poll(struct be_eq_obj *eqo)
3065{
3066}
3067
3068static inline void be_enable_busy_poll(struct be_eq_obj *eqo)
3069{
3070}
3071
3072static inline void be_disable_busy_poll(struct be_eq_obj *eqo)
3073{
3074}
3075#endif /* CONFIG_NET_RX_BUSY_POLL */
3076
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303077int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003078{
3079 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3080 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00003081 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05303082 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303083 struct be_tx_obj *txo;
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003084 u32 mult_enc = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00003085
Sathya Perla0b545a62012-11-23 00:27:18 +00003086 num_evts = events_get(eqo);
3087
Sathya Perlaa4906ea2014-09-02 09:56:56 +05303088 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3089 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003090
Sathya Perla6384a4d2013-10-25 10:40:16 +05303091 if (be_lock_napi(eqo)) {
3092 /* This loop will iterate twice for EQ0 in which
3093 * completions of the last RXQ (default one) are also processed
3094 * For other EQs the loop iterates only once
3095 */
3096 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3097 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
3098 max_work = max(work, max_work);
3099 }
3100 be_unlock_napi(eqo);
3101 } else {
3102 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08003103 }
3104
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003105 if (is_mcc_eqo(eqo))
3106 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003107
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003108 if (max_work < budget) {
3109 napi_complete(napi);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003110
3111 /* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3112 * delay via a delay multiplier encoding value
3113 */
3114 if (skyhawk_chip(adapter))
3115 mult_enc = be_get_eq_delay_mult_enc(eqo);
3116
3117 be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3118 mult_enc);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003119 } else {
3120 /* As we'll continue in polling mode, count and clear events */
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003121 be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00003122 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003123 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003124}
3125
Sathya Perla6384a4d2013-10-25 10:40:16 +05303126#ifdef CONFIG_NET_RX_BUSY_POLL
3127static int be_busy_poll(struct napi_struct *napi)
3128{
3129 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3130 struct be_adapter *adapter = eqo->adapter;
3131 struct be_rx_obj *rxo;
3132 int i, work = 0;
3133
3134 if (!be_lock_busy_poll(eqo))
3135 return LL_FLUSH_BUSY;
3136
3137 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3138 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
3139 if (work)
3140 break;
3141 }
3142
3143 be_unlock_busy_poll(eqo);
3144 return work;
3145}
3146#endif
3147
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003148void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00003149{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003150 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3151 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003152 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303153 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00003154
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303155 if (be_check_error(adapter, BE_ERROR_HW))
Sathya Perla72f02482011-11-10 19:17:58 +00003156 return;
3157
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003158 if (lancer_chip(adapter)) {
3159 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3160 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303161 be_set_error(adapter, BE_ERROR_UE);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003162 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303163 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003164 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05303165 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303166 /* Do not log error messages if its a FW reset */
3167 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3168 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3169 dev_info(dev, "Firmware update in progress\n");
3170 } else {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303171 dev_err(dev, "Error detected in the card\n");
3172 dev_err(dev, "ERR: sliport status 0x%x\n",
3173 sliport_status);
3174 dev_err(dev, "ERR: sliport error1 0x%x\n",
3175 sliport_err1);
3176 dev_err(dev, "ERR: sliport error2 0x%x\n",
3177 sliport_err2);
3178 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00003179 }
3180 } else {
Suresh Reddy25848c92015-03-20 06:28:25 -04003181 ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3182 ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3183 ue_lo_mask = ioread32(adapter->pcicfg +
3184 PCICFG_UE_STATUS_LOW_MASK);
3185 ue_hi_mask = ioread32(adapter->pcicfg +
3186 PCICFG_UE_STATUS_HI_MASK);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003187
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003188 ue_lo = (ue_lo & ~ue_lo_mask);
3189 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00003190
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303191 /* On certain platforms BE hardware can indicate spurious UEs.
3192 * Allow HW to stop working completely in case of a real UE.
3193 * Hence not setting the hw_error for UE detection.
3194 */
3195
3196 if (ue_lo || ue_hi) {
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303197 dev_err(dev,
3198 "Unrecoverable Error detected in the adapter");
3199 dev_err(dev, "Please reboot server to recover");
3200 if (skyhawk_chip(adapter))
Venkata Duvvuru954f6822015-05-13 13:00:13 +05303201 be_set_error(adapter, BE_ERROR_UE);
3202
Somnath Kotureb0eecc2014-02-12 16:07:54 +05303203 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3204 if (ue_lo & 1)
3205 dev_err(dev, "UE: %s bit set\n",
3206 ue_status_low_desc[i]);
3207 }
3208 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3209 if (ue_hi & 1)
3210 dev_err(dev, "UE: %s bit set\n",
3211 ue_status_hi_desc[i]);
3212 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05303213 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00003214 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00003215}
3216
Sathya Perla8d56ff12009-11-22 22:02:26 +00003217static void be_msix_disable(struct be_adapter *adapter)
3218{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003219 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00003220 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003221 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303222 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003223 }
3224}
3225
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003226static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003227{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003228 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00003229 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003230
Sathya Perla92bf14a2013-08-27 16:57:32 +05303231 /* If RoCE is supported, program the max number of NIC vectors that
3232 * may be configured via set-channels, along with vectors needed for
3233 * RoCe. Else, just program the number we'll use initially.
3234 */
3235 if (be_roce_supported(adapter))
3236 num_vec = min_t(int, 2 * be_max_eqs(adapter),
3237 2 * num_online_cpus());
3238 else
3239 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07003240
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003241 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003242 adapter->msix_entries[i].entry = i;
3243
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003244 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3245 MIN_MSIX_VECTORS, num_vec);
3246 if (num_vec < 0)
3247 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00003248
Sathya Perla92bf14a2013-08-27 16:57:32 +05303249 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3250 adapter->num_msix_roce_vec = num_vec / 2;
3251 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3252 adapter->num_msix_roce_vec);
3253 }
3254
3255 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3256
3257 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3258 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003259 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003260
3261fail:
3262 dev_warn(dev, "MSIx enable failed\n");
3263
3264 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
Kalesh AP18c57c72015-05-06 05:30:38 -04003265 if (be_virtfn(adapter))
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01003266 return num_vec;
3267 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003268}
3269
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003270static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303271 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003272{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05303273 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003274}
3275
3276static int be_msix_register(struct be_adapter *adapter)
3277{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003278 struct net_device *netdev = adapter->netdev;
3279 struct be_eq_obj *eqo;
3280 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003281
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003282 for_all_evt_queues(adapter, eqo, i) {
3283 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3284 vec = be_msix_vec_get(adapter, eqo);
3285 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07003286 if (status)
3287 goto err_msix;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003288
3289 irq_set_affinity_hint(vec, eqo->affinity_mask);
Sathya Perla3abcded2010-10-03 22:12:27 -07003290 }
Sathya Perlab628bde2009-08-17 00:58:26 +00003291
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003292 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07003293err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003294 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
3295 free_irq(be_msix_vec_get(adapter, eqo), eqo);
3296 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05303297 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003298 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003299 return status;
3300}
3301
3302static int be_irq_register(struct be_adapter *adapter)
3303{
3304 struct net_device *netdev = adapter->netdev;
3305 int status;
3306
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003307 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003308 status = be_msix_register(adapter);
3309 if (status == 0)
3310 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003311 /* INTx is not supported for VF */
Kalesh AP18c57c72015-05-06 05:30:38 -04003312 if (be_virtfn(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003313 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314 }
3315
Sathya Perlae49cc342012-11-27 19:50:02 +00003316 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003317 netdev->irq = adapter->pdev->irq;
3318 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00003319 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003320 if (status) {
3321 dev_err(&adapter->pdev->dev,
3322 "INTx request IRQ failed - err %d\n", status);
3323 return status;
3324 }
3325done:
3326 adapter->isr_registered = true;
3327 return 0;
3328}
3329
3330static void be_irq_unregister(struct be_adapter *adapter)
3331{
3332 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003333 struct be_eq_obj *eqo;
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003334 int i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003335
3336 if (!adapter->isr_registered)
3337 return;
3338
3339 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00003340 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00003341 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003342 goto done;
3343 }
3344
3345 /* MSIx */
Padmanabh Ratnakard658d982015-03-26 03:05:08 -04003346 for_all_evt_queues(adapter, eqo, i) {
3347 vec = be_msix_vec_get(adapter, eqo);
3348 irq_set_affinity_hint(vec, NULL);
3349 free_irq(vec, eqo);
3350 }
Sathya Perla3abcded2010-10-03 22:12:27 -07003351
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003352done:
3353 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003354}
3355
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003356static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003357{
3358 struct be_queue_info *q;
3359 struct be_rx_obj *rxo;
3360 int i;
3361
3362 for_all_rx_queues(adapter, rxo, i) {
3363 q = &rxo->q;
3364 if (q->created) {
Kalesh AP99b44302015-08-05 03:27:49 -04003365 /* If RXQs are destroyed while in an "out of buffer"
3366 * state, there is a possibility of an HW stall on
3367 * Lancer. So, post 64 buffers to each queue to relieve
3368 * the "out of buffer" condition.
3369 * Make sure there's space in the RXQ before posting.
3370 */
3371 if (lancer_chip(adapter)) {
3372 be_rx_cq_clean(rxo);
3373 if (atomic_read(&q->used) == 0)
3374 be_post_rx_frags(rxo, GFP_KERNEL,
3375 MAX_RX_POST);
3376 }
3377
Sathya Perla482c9e72011-06-29 23:33:17 +00003378 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003379 be_rx_cq_clean(rxo);
Kalesh AP99b44302015-08-05 03:27:49 -04003380 be_rxq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00003381 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003382 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00003383 }
3384}
3385
Kalesh APbcc84142015-08-05 03:27:48 -04003386static void be_disable_if_filters(struct be_adapter *adapter)
3387{
3388 be_cmd_pmac_del(adapter, adapter->if_handle,
3389 adapter->pmac_id[0], 0);
3390
3391 be_clear_uc_list(adapter);
3392
3393 /* The IFACE flags are enabled in the open path and cleared
3394 * in the close path. When a VF gets detached from the host and
3395 * assigned to a VM the following happens:
3396 * - VF's IFACE flags get cleared in the detach path
3397 * - IFACE create is issued by the VF in the attach path
3398 * Due to a bug in the BE3/Skyhawk-R FW
3399 * (Lancer FW doesn't have the bug), the IFACE capability flags
3400 * specified along with the IFACE create cmd issued by a VF are not
3401 * honoured by FW. As a consequence, if a *new* driver
3402 * (that enables/disables IFACE flags in open/close)
3403 * is loaded in the host and an *old* driver is * used by a VM/VF,
3404 * the IFACE gets created *without* the needed flags.
3405 * To avoid this, disable RX-filter flags only for Lancer.
3406 */
3407 if (lancer_chip(adapter)) {
3408 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3409 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3410 }
3411}
3412
Sathya Perla889cd4b2010-05-30 23:33:45 +00003413static int be_close(struct net_device *netdev)
3414{
3415 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003416 struct be_eq_obj *eqo;
3417 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00003418
Kalesh APe1ad8e32014-04-14 16:12:41 +05303419 /* This protection is needed as be_close() may be called even when the
3420 * adapter is in cleared state (after eeh perm failure)
3421 */
3422 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3423 return 0;
3424
Kalesh APbcc84142015-08-05 03:27:48 -04003425 be_disable_if_filters(adapter);
3426
Parav Pandit045508a2012-03-26 14:27:13 +00003427 be_roce_dev_close(adapter);
3428
Ivan Veceradff345c52013-11-27 08:59:32 +01003429 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3430 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00003431 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303432 be_disable_busy_poll(eqo);
3433 }
David S. Miller71237b62013-11-28 18:53:36 -05003434 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00003435 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003436
3437 be_async_mcc_disable(adapter);
3438
3439 /* Wait for all pending tx completions to arrive so that
3440 * all tx skbs are freed.
3441 */
Sathya Perlafba87552013-05-08 02:05:50 +00003442 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05303443 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003444
3445 be_rx_qs_destroy(adapter);
Ajit Khaparded11a3472013-11-18 10:44:37 -06003446
Sathya Perlaa323d9b2012-12-17 19:38:50 +00003447 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003448 if (msix_enabled(adapter))
3449 synchronize_irq(be_msix_vec_get(adapter, eqo));
3450 else
3451 synchronize_irq(netdev->irq);
3452 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00003453 }
3454
Sathya Perla889cd4b2010-05-30 23:33:45 +00003455 be_irq_unregister(adapter);
3456
Sathya Perla482c9e72011-06-29 23:33:17 +00003457 return 0;
3458}
3459
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003460static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00003461{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003462 struct rss_info *rss = &adapter->rss_info;
3463 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00003464 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003465 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00003466
3467 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003468 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3469 sizeof(struct be_eth_rx_d));
3470 if (rc)
3471 return rc;
3472 }
3473
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003474 if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3475 rxo = default_rxo(adapter);
3476 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3477 rx_frag_size, adapter->if_handle,
3478 false, &rxo->rss_id);
3479 if (rc)
3480 return rc;
3481 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003482
3483 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00003484 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003485 rx_frag_size, adapter->if_handle,
3486 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00003487 if (rc)
3488 return rc;
3489 }
3490
3491 if (be_multi_rxq(adapter)) {
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05003492 for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003493 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303494 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003495 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05303496 rss->rsstable[j + i] = rxo->rss_id;
3497 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00003498 }
3499 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05303500 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3501 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00003502
3503 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05303504 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3505 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303506 } else {
3507 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05303508 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303509 }
Suresh Reddy594ad542013-04-25 23:03:20 +00003510
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003511 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05303512 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003513 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303514 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05303515 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05303516 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00003517 }
3518
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08003519 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05303520
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003521 /* Post 1 less than RXQ-len to avoid head being equal to tail,
3522 * which is a queue empty condition
3523 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003524 for_all_rx_queues(adapter, rxo, i)
Suresh Reddyb02e60c2015-05-06 05:30:36 -04003525 be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3526
Sathya Perla889cd4b2010-05-30 23:33:45 +00003527 return 0;
3528}
3529
Kalesh APbcc84142015-08-05 03:27:48 -04003530static int be_enable_if_filters(struct be_adapter *adapter)
3531{
3532 int status;
3533
3534 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3535 if (status)
3536 return status;
3537
3538 /* For BE3 VFs, the PF programs the initial MAC address */
3539 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3540 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3541 adapter->if_handle,
3542 &adapter->pmac_id[0], 0);
3543 if (status)
3544 return status;
3545 }
3546
3547 if (adapter->vlans_added)
3548 be_vid_config(adapter);
3549
3550 be_set_rx_mode(adapter->netdev);
3551
3552 return 0;
3553}
3554
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003555static int be_open(struct net_device *netdev)
3556{
3557 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003558 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07003559 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003560 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003561 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07003562 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003563
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003564 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00003565 if (status)
3566 goto err;
3567
Kalesh APbcc84142015-08-05 03:27:48 -04003568 status = be_enable_if_filters(adapter);
3569 if (status)
3570 goto err;
3571
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003572 status = be_irq_register(adapter);
3573 if (status)
3574 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003575
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003576 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07003577 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003578
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003579 for_all_tx_queues(adapter, txo, i)
3580 be_cq_notify(adapter, txo->cq.id, true, 0);
3581
Sathya Perla7a1e9b22010-02-17 01:35:11 +00003582 be_async_mcc_enable(adapter);
3583
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003584 for_all_evt_queues(adapter, eqo, i) {
3585 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05303586 be_enable_busy_poll(eqo);
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04003587 be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003588 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00003589 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003590
Sathya Perla323ff712012-09-28 04:39:43 +00003591 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00003592 if (!status)
3593 be_link_status_update(adapter, link_status);
3594
Sathya Perlafba87552013-05-08 02:05:50 +00003595 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00003596 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05303597
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303598#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303599 if (skyhawk_chip(adapter))
3600 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303601#endif
3602
Sathya Perla889cd4b2010-05-30 23:33:45 +00003603 return 0;
3604err:
3605 be_close(adapter->netdev);
3606 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00003607}
3608
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003609static int be_setup_wol(struct be_adapter *adapter, bool enable)
3610{
3611 struct be_dma_mem cmd;
3612 int status = 0;
3613 u8 mac[ETH_ALEN];
3614
Joe Perchesc7bf7162015-03-02 19:54:47 -08003615 eth_zero_addr(mac);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003616
3617 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07003618 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
3619 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05303620 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05303621 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003622
3623 if (enable) {
3624 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05303625 PCICFG_PM_CONTROL_OFFSET,
3626 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003627 if (status) {
3628 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00003629 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003630 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
3631 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003632 return status;
3633 }
3634 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303635 adapter->netdev->dev_addr,
3636 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003637 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3638 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3639 } else {
3640 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3641 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3642 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3643 }
3644
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003645 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003646 return status;
3647}
3648
Sathya Perlaf7062ee2015-02-06 08:18:35 -05003649static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3650{
3651 u32 addr;
3652
3653 addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3654
3655 mac[5] = (u8)(addr & 0xFF);
3656 mac[4] = (u8)((addr >> 8) & 0xFF);
3657 mac[3] = (u8)((addr >> 16) & 0xFF);
3658 /* Use the OUI from the current MAC address */
3659 memcpy(mac, adapter->netdev->dev_addr, 3);
3660}
3661
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003662/*
3663 * Generate a seed MAC address from the PF MAC Address using jhash.
3664 * MAC Address for VFs are assigned incrementally starting from the seed.
3665 * These addresses are programmed in the ASIC by the PF and the VF driver
3666 * queries for the MAC address during its probe.
3667 */
Sathya Perla4c876612013-02-03 20:30:11 +00003668static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003669{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003670 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003671 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003672 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003673 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003674
3675 be_vf_eth_addr_generate(adapter, mac);
3676
Sathya Perla11ac75e2011-12-13 00:58:50 +00003677 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303678 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003679 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003680 vf_cfg->if_handle,
3681 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303682 else
3683 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3684 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003685
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003686 if (status)
3687 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303688 "Mac address assignment failed for VF %d\n",
3689 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003690 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003691 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003692
3693 mac[5] += 1;
3694 }
3695 return status;
3696}
3697
Sathya Perla4c876612013-02-03 20:30:11 +00003698static int be_vfs_mac_query(struct be_adapter *adapter)
3699{
3700 int status, vf;
3701 u8 mac[ETH_ALEN];
3702 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003703
3704 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303705 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3706 mac, vf_cfg->if_handle,
3707 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003708 if (status)
3709 return status;
3710 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3711 }
3712 return 0;
3713}
3714
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003715static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003716{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003717 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003718 u32 vf;
3719
Sathya Perla257a3fe2013-06-14 15:54:51 +05303720 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003721 dev_warn(&adapter->pdev->dev,
3722 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003723 goto done;
3724 }
3725
Sathya Perlab4c1df92013-05-08 02:05:47 +00003726 pci_disable_sriov(adapter->pdev);
3727
Sathya Perla11ac75e2011-12-13 00:58:50 +00003728 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303729 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003730 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3731 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303732 else
3733 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3734 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003735
Sathya Perla11ac75e2011-12-13 00:58:50 +00003736 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3737 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003738done:
3739 kfree(adapter->vf_cfg);
3740 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303741 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003742}
3743
Sathya Perla77071332013-08-27 16:57:34 +05303744static void be_clear_queues(struct be_adapter *adapter)
3745{
3746 be_mcc_queues_destroy(adapter);
3747 be_rx_cqs_destroy(adapter);
3748 be_tx_queues_destroy(adapter);
3749 be_evt_queues_destroy(adapter);
3750}
3751
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303752static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003753{
Sathya Perla191eb752012-02-23 18:50:13 +00003754 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3755 cancel_delayed_work_sync(&adapter->work);
3756 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3757 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303758}
3759
Sathya Perlaeb7dd462015-02-23 04:20:11 -05003760static void be_cancel_err_detection(struct be_adapter *adapter)
3761{
3762 if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3763 cancel_delayed_work_sync(&adapter->be_err_detection_work);
3764 adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3765 }
3766}
3767
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303768#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303769static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3770{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003771 struct net_device *netdev = adapter->netdev;
3772
Sathya Perlac9c47142014-03-27 10:46:19 +05303773 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3774 be_cmd_manage_iface(adapter, adapter->if_handle,
3775 OP_CONVERT_TUNNEL_TO_NORMAL);
3776
3777 if (adapter->vxlan_port)
3778 be_cmd_set_vxlan_port(adapter, 0);
3779
3780 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3781 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003782
3783 netdev->hw_enc_features = 0;
3784 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303785 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303786}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303787#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303788
Vasundhara Volamf2858732015-03-04 00:44:33 -05003789static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
3790{
3791 struct be_resources res = adapter->pool_res;
3792 u16 num_vf_qs = 1;
3793
3794 /* Distribute the queue resources equally among the PF and it's VFs
3795 * Do not distribute queue resources in multi-channel configuration.
3796 */
3797 if (num_vfs && !be_is_mc(adapter)) {
3798 /* If number of VFs requested is 8 less than max supported,
3799 * assign 8 queue pairs to the PF and divide the remaining
3800 * resources evenly among the VFs
3801 */
3802 if (num_vfs < (be_max_vfs(adapter) - 8))
3803 num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
3804 else
3805 num_vf_qs = res.max_rss_qs / num_vfs;
3806
3807 /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
3808 * interfaces per port. Provide RSS on VFs, only if number
3809 * of VFs requested is less than MAX_RSS_IFACES limit.
3810 */
3811 if (num_vfs >= MAX_RSS_IFACES)
3812 num_vf_qs = 1;
3813 }
3814 return num_vf_qs;
3815}
3816
Somnath Koturb05004a2013-12-05 12:08:16 +05303817static int be_clear(struct be_adapter *adapter)
3818{
Vasundhara Volamf2858732015-03-04 00:44:33 -05003819 struct pci_dev *pdev = adapter->pdev;
3820 u16 num_vf_qs;
3821
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303822 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003823
Sathya Perla11ac75e2011-12-13 00:58:50 +00003824 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003825 be_vf_clear(adapter);
3826
Vasundhara Volambec84e62014-06-30 13:01:32 +05303827 /* Re-configure FW to distribute resources evenly across max-supported
3828 * number of VFs, only when VFs are not already enabled.
3829 */
Vasundhara Volamace40af2015-03-04 00:44:34 -05003830 if (skyhawk_chip(adapter) && be_physfn(adapter) &&
3831 !pci_vfs_assigned(pdev)) {
Vasundhara Volamf2858732015-03-04 00:44:33 -05003832 num_vf_qs = be_calculate_vf_qs(adapter,
3833 pci_sriov_get_totalvfs(pdev));
Vasundhara Volambec84e62014-06-30 13:01:32 +05303834 be_cmd_set_sriov_config(adapter, adapter->pool_res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003835 pci_sriov_get_totalvfs(pdev),
3836 num_vf_qs);
3837 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303838
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303839#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303840 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303841#endif
Kalesh APbcc84142015-08-05 03:27:48 -04003842 kfree(adapter->pmac_id);
3843 adapter->pmac_id = NULL;
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003844
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003845 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003846
Sathya Perla77071332013-08-27 16:57:34 +05303847 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003848
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003849 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303850 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003851 return 0;
3852}
3853
Sathya Perla4c876612013-02-03 20:30:11 +00003854static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003855{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303856 struct be_resources res = {0};
Kalesh APbcc84142015-08-05 03:27:48 -04003857 u32 cap_flags, en_flags, vf;
Sathya Perla4c876612013-02-03 20:30:11 +00003858 struct be_vf_cfg *vf_cfg;
Kalesh AP0700d812015-01-20 03:51:43 -05003859 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003860
Kalesh AP0700d812015-01-20 03:51:43 -05003861 /* If a FW profile exists, then cap_flags are updated */
Sathya Perla4c876612013-02-03 20:30:11 +00003862 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
Somnath Kotur0ed7d742015-05-06 05:30:34 -04003863 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003864
Sathya Perla4c876612013-02-03 20:30:11 +00003865 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303866 if (!BE3_chip(adapter)) {
3867 status = be_cmd_get_profile_config(adapter, &res,
Vasundhara Volamf2858732015-03-04 00:44:33 -05003868 RESOURCE_LIMITS,
Sathya Perla92bf14a2013-08-27 16:57:32 +05303869 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003870 if (!status) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303871 cap_flags = res.if_cap_flags;
Vasundhara Volam435452a2015-03-20 06:28:23 -04003872 /* Prevent VFs from enabling VLAN promiscuous
3873 * mode
3874 */
3875 cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
3876 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303877 }
Sathya Perla4c876612013-02-03 20:30:11 +00003878
Kalesh APbcc84142015-08-05 03:27:48 -04003879 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3880 BE_IF_FLAGS_BROADCAST |
3881 BE_IF_FLAGS_MULTICAST |
3882 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3883 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3884 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003885 if (status)
Kalesh AP0700d812015-01-20 03:51:43 -05003886 return status;
Sathya Perla4c876612013-02-03 20:30:11 +00003887 }
Kalesh AP0700d812015-01-20 03:51:43 -05003888
3889 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003890}
3891
Sathya Perla39f1d942012-05-08 19:41:24 +00003892static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003893{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003894 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003895 int vf;
3896
Sathya Perla39f1d942012-05-08 19:41:24 +00003897 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3898 GFP_KERNEL);
3899 if (!adapter->vf_cfg)
3900 return -ENOMEM;
3901
Sathya Perla11ac75e2011-12-13 00:58:50 +00003902 for_all_vfs(adapter, vf_cfg, vf) {
3903 vf_cfg->if_handle = -1;
3904 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003905 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003906 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003907}
3908
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003909static int be_vf_setup(struct be_adapter *adapter)
3910{
Sathya Perla4c876612013-02-03 20:30:11 +00003911 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303912 struct be_vf_cfg *vf_cfg;
3913 int status, old_vfs, vf;
Kalesh APe7bcbd72015-05-06 05:30:32 -04003914 bool spoofchk;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003915
Sathya Perla257a3fe2013-06-14 15:54:51 +05303916 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003917
3918 status = be_vf_setup_init(adapter);
3919 if (status)
3920 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003921
Sathya Perla4c876612013-02-03 20:30:11 +00003922 if (old_vfs) {
3923 for_all_vfs(adapter, vf_cfg, vf) {
3924 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3925 if (status)
3926 goto err;
3927 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003928
Sathya Perla4c876612013-02-03 20:30:11 +00003929 status = be_vfs_mac_query(adapter);
3930 if (status)
3931 goto err;
3932 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303933 status = be_vfs_if_create(adapter);
3934 if (status)
3935 goto err;
3936
Sathya Perla39f1d942012-05-08 19:41:24 +00003937 status = be_vf_eth_addr_config(adapter);
3938 if (status)
3939 goto err;
3940 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003941
Sathya Perla11ac75e2011-12-13 00:58:50 +00003942 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303943 /* Allow VFs to programs MAC/VLAN filters */
Vasundhara Volam435452a2015-03-20 06:28:23 -04003944 status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
3945 vf + 1);
3946 if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
Sathya Perla04a06022013-07-23 15:25:00 +05303947 status = be_cmd_set_fn_privileges(adapter,
Vasundhara Volam435452a2015-03-20 06:28:23 -04003948 vf_cfg->privileges |
Sathya Perla04a06022013-07-23 15:25:00 +05303949 BE_PRIV_FILTMGMT,
3950 vf + 1);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003951 if (!status) {
3952 vf_cfg->privileges |= BE_PRIV_FILTMGMT;
Sathya Perla04a06022013-07-23 15:25:00 +05303953 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3954 vf);
Vasundhara Volam435452a2015-03-20 06:28:23 -04003955 }
Sathya Perla04a06022013-07-23 15:25:00 +05303956 }
3957
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303958 /* Allow full available bandwidth */
3959 if (!old_vfs)
3960 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003961
Kalesh APe7bcbd72015-05-06 05:30:32 -04003962 status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
3963 vf_cfg->if_handle, NULL,
3964 &spoofchk);
3965 if (!status)
3966 vf_cfg->spoofchk = spoofchk;
3967
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303968 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303969 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303970 be_cmd_set_logical_link_config(adapter,
3971 IFLA_VF_LINK_STATE_AUTO,
3972 vf+1);
3973 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003974 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003975
3976 if (!old_vfs) {
3977 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3978 if (status) {
3979 dev_err(dev, "SRIOV enable failed\n");
3980 adapter->num_vfs = 0;
3981 goto err;
3982 }
3983 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303984
3985 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003986 return 0;
3987err:
Sathya Perla4c876612013-02-03 20:30:11 +00003988 dev_err(dev, "VF setup failed\n");
3989 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003990 return status;
3991}
3992
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303993/* Converting function_mode bits on BE3 to SH mc_type enums */
3994
3995static u8 be_convert_mc_type(u32 function_mode)
3996{
Suresh Reddy66064db2014-06-23 16:41:29 +05303997 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303998 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303999 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304000 return FLEX10;
4001 else if (function_mode & VNIC_MODE)
4002 return vNIC2;
4003 else if (function_mode & UMC_ENABLED)
4004 return UMC;
4005 else
4006 return MC_NONE;
4007}
4008
Sathya Perla92bf14a2013-08-27 16:57:32 +05304009/* On BE2/BE3 FW does not suggest the supported limits */
4010static void BEx_get_resources(struct be_adapter *adapter,
4011 struct be_resources *res)
4012{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304013 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304014
4015 if (be_physfn(adapter))
4016 res->max_uc_mac = BE_UC_PMAC_COUNT;
4017 else
4018 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4019
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304020 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4021
4022 if (be_is_mc(adapter)) {
4023 /* Assuming that there are 4 channels per port,
4024 * when multi-channel is enabled
4025 */
4026 if (be_is_qnq_mode(adapter))
4027 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4028 else
4029 /* In a non-qnq multichannel mode, the pvid
4030 * takes up one vlan entry
4031 */
4032 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4033 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304034 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304035 }
4036
Sathya Perla92bf14a2013-08-27 16:57:32 +05304037 res->max_mcast_mac = BE_MAX_MC;
4038
Vasundhara Volama5243da2014-03-11 18:53:07 +05304039 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4040 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4041 * *only* if it is RSS-capable.
4042 */
4043 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
Kalesh AP18c57c72015-05-06 05:30:38 -04004044 be_virtfn(adapter) ||
4045 (be_is_mc(adapter) &&
4046 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304047 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05304048 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4049 struct be_resources super_nic_res = {0};
4050
4051 /* On a SuperNIC profile, the driver needs to use the
4052 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4053 */
Vasundhara Volamf2858732015-03-04 00:44:33 -05004054 be_cmd_get_profile_config(adapter, &super_nic_res,
4055 RESOURCE_LIMITS, 0);
Suresh Reddya28277d2014-09-02 09:56:57 +05304056 /* Some old versions of BE3 FW don't report max_tx_qs value */
4057 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4058 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304059 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05304060 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05304061
4062 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4063 !use_sriov && be_physfn(adapter))
4064 res->max_rss_qs = (adapter->be3_native) ?
4065 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4066 res->max_rx_qs = res->max_rss_qs + 1;
4067
Suresh Reddye3dc8672014-01-06 13:02:25 +05304068 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05304069 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05304070 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4071 else
4072 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304073
4074 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004075 res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
Sathya Perla92bf14a2013-08-27 16:57:32 +05304076 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4077 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4078}
4079
Sathya Perla30128032011-11-10 19:17:57 +00004080static void be_setup_init(struct be_adapter *adapter)
4081{
4082 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004083 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00004084 adapter->if_handle = -1;
4085 adapter->be3_native = false;
Sathya Perlaf66b7cf2015-02-06 08:18:41 -05004086 adapter->if_flags = 0;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004087 if (be_physfn(adapter))
4088 adapter->cmd_privileges = MAX_PRIVILEGES;
4089 else
4090 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00004091}
4092
Vasundhara Volambec84e62014-06-30 13:01:32 +05304093static int be_get_sriov_config(struct be_adapter *adapter)
4094{
Vasundhara Volambec84e62014-06-30 13:01:32 +05304095 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05304096 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304097
Vasundhara Volamf2858732015-03-04 00:44:33 -05004098 be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
Sathya Perlad3d18312014-08-01 17:47:30 +05304099
Vasundhara Volamace40af2015-03-04 00:44:34 -05004100 /* Some old versions of BE3 FW don't report max_vfs value */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304101 if (BE3_chip(adapter) && !res.max_vfs) {
4102 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4103 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4104 }
4105
Sathya Perlad3d18312014-08-01 17:47:30 +05304106 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304107
Vasundhara Volamace40af2015-03-04 00:44:34 -05004108 /* If during previous unload of the driver, the VFs were not disabled,
4109 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4110 * Instead use the TotalVFs value stored in the pci-dev struct.
4111 */
Vasundhara Volambec84e62014-06-30 13:01:32 +05304112 old_vfs = pci_num_vf(adapter->pdev);
4113 if (old_vfs) {
Vasundhara Volamace40af2015-03-04 00:44:34 -05004114 dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4115 old_vfs);
4116
4117 adapter->pool_res.max_vfs =
4118 pci_sriov_get_totalvfs(adapter->pdev);
Vasundhara Volambec84e62014-06-30 13:01:32 +05304119 adapter->num_vfs = old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05304120 }
4121
4122 return 0;
4123}
4124
Vasundhara Volamace40af2015-03-04 00:44:34 -05004125static void be_alloc_sriov_res(struct be_adapter *adapter)
4126{
4127 int old_vfs = pci_num_vf(adapter->pdev);
4128 u16 num_vf_qs;
4129 int status;
4130
4131 be_get_sriov_config(adapter);
4132
4133 if (!old_vfs)
4134 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4135
4136 /* When the HW is in SRIOV capable configuration, the PF-pool
4137 * resources are given to PF during driver load, if there are no
4138 * old VFs. This facility is not available in BE3 FW.
4139 * Also, this is done by FW in Lancer chip.
4140 */
4141 if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4142 num_vf_qs = be_calculate_vf_qs(adapter, 0);
4143 status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4144 num_vf_qs);
4145 if (status)
4146 dev_err(&adapter->pdev->dev,
4147 "Failed to optimize SRIOV resources\n");
4148 }
4149}
4150
Sathya Perla92bf14a2013-08-27 16:57:32 +05304151static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004152{
Sathya Perla92bf14a2013-08-27 16:57:32 +05304153 struct device *dev = &adapter->pdev->dev;
4154 struct be_resources res = {0};
4155 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004156
Sathya Perla92bf14a2013-08-27 16:57:32 +05304157 if (BEx_chip(adapter)) {
4158 BEx_get_resources(adapter, &res);
4159 adapter->res = res;
4160 }
4161
Sathya Perla92bf14a2013-08-27 16:57:32 +05304162 /* For Lancer, SH etc read per-function resource limits from FW.
4163 * GET_FUNC_CONFIG returns per function guaranteed limits.
4164 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4165 */
Sathya Perla4c876612013-02-03 20:30:11 +00004166 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05304167 status = be_cmd_get_func_config(adapter, &res);
4168 if (status)
4169 return status;
4170
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004171 /* If a deafault RXQ must be created, we'll use up one RSSQ*/
4172 if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4173 !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4174 res.max_rss_qs -= 1;
4175
Sathya Perla92bf14a2013-08-27 16:57:32 +05304176 /* If RoCE may be enabled stash away half the EQs for RoCE */
4177 if (be_roce_supported(adapter))
4178 res.max_evt_qs /= 2;
4179 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004180 }
4181
Vasundhara Volam71bb8bd2015-03-04 00:44:32 -05004182 /* If FW supports RSS default queue, then skip creating non-RSS
4183 * queue for non-IP traffic.
4184 */
4185 adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4186 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4187
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304188 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4189 be_max_txqs(adapter), be_max_rxqs(adapter),
4190 be_max_rss(adapter), be_max_eqs(adapter),
4191 be_max_vfs(adapter));
4192 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4193 be_max_uc(adapter), be_max_mc(adapter),
4194 be_max_vlans(adapter));
4195
Vasundhara Volamace40af2015-03-04 00:44:34 -05004196 /* Sanitize cfg_num_qs based on HW and platform limits */
4197 adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
4198 be_max_qs(adapter));
Sathya Perla92bf14a2013-08-27 16:57:32 +05304199 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004200}
4201
Sathya Perla39f1d942012-05-08 19:41:24 +00004202static int be_get_config(struct be_adapter *adapter)
4203{
Sathya Perla6b085ba2015-02-23 04:20:09 -05004204 int status, level;
Vasundhara Volam542963b2014-01-15 13:23:33 +05304205 u16 profile_id;
Sathya Perla6b085ba2015-02-23 04:20:09 -05004206
4207 status = be_cmd_get_cntl_attributes(adapter);
4208 if (status)
4209 return status;
Sathya Perla39f1d942012-05-08 19:41:24 +00004210
Kalesh APe97e3cd2014-07-17 16:20:26 +05304211 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004212 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05304213 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004214
Sathya Perla6b085ba2015-02-23 04:20:09 -05004215 if (BEx_chip(adapter)) {
4216 level = be_cmd_get_fw_log_level(adapter);
4217 adapter->msg_enable =
4218 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4219 }
4220
4221 be_cmd_get_acpi_wol_cap(adapter);
4222
Vasundhara Volam21252372015-02-06 08:18:42 -05004223 be_cmd_query_port_name(adapter);
4224
4225 if (be_physfn(adapter)) {
Vasundhara Volam542963b2014-01-15 13:23:33 +05304226 status = be_cmd_get_active_profile(adapter, &profile_id);
4227 if (!status)
4228 dev_info(&adapter->pdev->dev,
4229 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05304230 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05304231
Sathya Perla92bf14a2013-08-27 16:57:32 +05304232 status = be_get_resources(adapter);
4233 if (status)
4234 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004235
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05304236 adapter->pmac_id = kcalloc(be_max_uc(adapter),
4237 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05304238 if (!adapter->pmac_id)
4239 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004240
Sathya Perla92bf14a2013-08-27 16:57:32 +05304241 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00004242}
4243
Sathya Perla95046b92013-07-23 15:25:02 +05304244static int be_mac_setup(struct be_adapter *adapter)
4245{
4246 u8 mac[ETH_ALEN];
4247 int status;
4248
4249 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4250 status = be_cmd_get_perm_mac(adapter, mac);
4251 if (status)
4252 return status;
4253
4254 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4255 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla95046b92013-07-23 15:25:02 +05304256 }
4257
Sathya Perla95046b92013-07-23 15:25:02 +05304258 return 0;
4259}
4260
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304261static void be_schedule_worker(struct be_adapter *adapter)
4262{
4263 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4264 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4265}
4266
Sathya Perlaeb7dd462015-02-23 04:20:11 -05004267static void be_schedule_err_detection(struct be_adapter *adapter)
4268{
4269 schedule_delayed_work(&adapter->be_err_detection_work,
4270 msecs_to_jiffies(1000));
4271 adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4272}
4273
Sathya Perla77071332013-08-27 16:57:34 +05304274static int be_setup_queues(struct be_adapter *adapter)
4275{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304276 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05304277 int status;
4278
4279 status = be_evt_queues_create(adapter);
4280 if (status)
4281 goto err;
4282
4283 status = be_tx_qs_create(adapter);
4284 if (status)
4285 goto err;
4286
4287 status = be_rx_cqs_create(adapter);
4288 if (status)
4289 goto err;
4290
4291 status = be_mcc_queues_create(adapter);
4292 if (status)
4293 goto err;
4294
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304295 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4296 if (status)
4297 goto err;
4298
4299 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4300 if (status)
4301 goto err;
4302
Sathya Perla77071332013-08-27 16:57:34 +05304303 return 0;
4304err:
4305 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4306 return status;
4307}
4308
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304309int be_update_queues(struct be_adapter *adapter)
4310{
4311 struct net_device *netdev = adapter->netdev;
4312 int status;
4313
4314 if (netif_running(netdev))
4315 be_close(netdev);
4316
4317 be_cancel_worker(adapter);
4318
4319 /* If any vectors have been shared with RoCE we cannot re-program
4320 * the MSIx table.
4321 */
4322 if (!adapter->num_msix_roce_vec)
4323 be_msix_disable(adapter);
4324
4325 be_clear_queues(adapter);
4326
4327 if (!msix_enabled(adapter)) {
4328 status = be_msix_enable(adapter);
4329 if (status)
4330 return status;
4331 }
4332
4333 status = be_setup_queues(adapter);
4334 if (status)
4335 return status;
4336
4337 be_schedule_worker(adapter);
4338
4339 if (netif_running(netdev))
4340 status = be_open(netdev);
4341
4342 return status;
4343}
4344
Sathya Perlaf7062ee2015-02-06 08:18:35 -05004345static inline int fw_major_num(const char *fw_ver)
4346{
4347 int fw_major = 0, i;
4348
4349 i = sscanf(fw_ver, "%d.", &fw_major);
4350 if (i != 1)
4351 return 0;
4352
4353 return fw_major;
4354}
4355
Sathya Perlaf962f842015-02-23 04:20:16 -05004356/* If any VFs are already enabled don't FLR the PF */
4357static bool be_reset_required(struct be_adapter *adapter)
4358{
4359 return pci_num_vf(adapter->pdev) ? false : true;
4360}
4361
4362/* Wait for the FW to be ready and perform the required initialization */
4363static int be_func_init(struct be_adapter *adapter)
4364{
4365 int status;
4366
4367 status = be_fw_wait_ready(adapter);
4368 if (status)
4369 return status;
4370
4371 if (be_reset_required(adapter)) {
4372 status = be_cmd_reset_function(adapter);
4373 if (status)
4374 return status;
4375
4376 /* Wait for interrupts to quiesce after an FLR */
4377 msleep(100);
4378
4379 /* We can clear all errors when function reset succeeds */
Venkata Duvvuru954f6822015-05-13 13:00:13 +05304380 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlaf962f842015-02-23 04:20:16 -05004381 }
4382
4383 /* Tell FW we're ready to fire cmds */
4384 status = be_cmd_fw_init(adapter);
4385 if (status)
4386 return status;
4387
4388 /* Allow interrupts for other ULPs running on NIC function */
4389 be_intr_set(adapter, true);
4390
4391 return 0;
4392}
4393
Sathya Perla5fb379e2009-06-18 00:02:59 +00004394static int be_setup(struct be_adapter *adapter)
4395{
Sathya Perla39f1d942012-05-08 19:41:24 +00004396 struct device *dev = &adapter->pdev->dev;
Kalesh APbcc84142015-08-05 03:27:48 -04004397 u32 en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004398 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004399
Sathya Perlaf962f842015-02-23 04:20:16 -05004400 status = be_func_init(adapter);
4401 if (status)
4402 return status;
4403
Sathya Perla30128032011-11-10 19:17:57 +00004404 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004405
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004406 if (!lancer_chip(adapter))
4407 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00004408
Vasundhara Volamace40af2015-03-04 00:44:34 -05004409 if (!BE2_chip(adapter) && be_physfn(adapter))
4410 be_alloc_sriov_res(adapter);
4411
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00004412 status = be_get_config(adapter);
4413 if (status)
4414 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00004415
Somnath Koturc2bba3d2013-05-02 03:37:08 +00004416 status = be_msix_enable(adapter);
4417 if (status)
4418 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004419
Kalesh APbcc84142015-08-05 03:27:48 -04004420 /* will enable all the needed filter flags in be_open() */
4421 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4422 en_flags = en_flags & be_if_cap_flags(adapter);
4423 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4424 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004425 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004426 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004427
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304428 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4429 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05304430 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304431 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004432 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00004433 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004434
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004435 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004436
Sathya Perla95046b92013-07-23 15:25:02 +05304437 status = be_mac_setup(adapter);
4438 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00004439 goto err;
4440
Kalesh APe97e3cd2014-07-17 16:20:26 +05304441 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304442 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00004443
Somnath Koture9e2a902013-10-24 14:37:53 +05304444 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05304445 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05304446 adapter->fw_ver);
4447 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4448 }
4449
Kalesh AP00d594c2015-01-20 03:51:44 -05004450 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4451 adapter->rx_fc);
4452 if (status)
4453 be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4454 &adapter->rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00004455
Kalesh AP00d594c2015-01-20 03:51:44 -05004456 dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4457 adapter->tx_fc, adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004458
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304459 if (be_physfn(adapter))
4460 be_cmd_set_logical_link_config(adapter,
4461 IFLA_VF_LINK_STATE_AUTO, 0);
4462
Vasundhara Volambec84e62014-06-30 13:01:32 +05304463 if (adapter->num_vfs)
4464 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004465
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00004466 status = be_cmd_get_phy_info(adapter);
4467 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004468 adapter->phy.fc_autoneg = 1;
4469
Sathya Perla68d7bdc2013-08-27 16:57:35 +05304470 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05304471 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004472 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00004473err:
4474 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004475 return status;
4476}
4477
Ivan Vecera66268732011-12-08 01:31:21 +00004478#ifdef CONFIG_NET_POLL_CONTROLLER
4479static void be_netpoll(struct net_device *netdev)
4480{
4481 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004482 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00004483 int i;
4484
Sathya Perlae49cc342012-11-27 19:50:02 +00004485 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04004486 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
Sathya Perlae49cc342012-11-27 19:50:02 +00004487 napi_schedule(&eqo->napi);
4488 }
Ivan Vecera66268732011-12-08 01:31:21 +00004489}
4490#endif
4491
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304492static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08004493
Sathya Perla306f1342011-08-02 19:57:45 +00004494static bool phy_flashing_required(struct be_adapter *adapter)
4495{
Vasundhara Volame02cfd92015-01-20 03:51:48 -05004496 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00004497 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00004498}
4499
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004500static bool is_comp_in_ufi(struct be_adapter *adapter,
4501 struct flash_section_info *fsec, int type)
4502{
4503 int i = 0, img_type = 0;
4504 struct flash_section_info_g2 *fsec_g2 = NULL;
4505
Sathya Perlaca34fe32012-11-06 17:48:56 +00004506 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004507 fsec_g2 = (struct flash_section_info_g2 *)fsec;
4508
4509 for (i = 0; i < MAX_FLASH_COMP; i++) {
4510 if (fsec_g2)
4511 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
4512 else
4513 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4514
4515 if (img_type == type)
4516 return true;
4517 }
4518 return false;
4519
4520}
4521
Jingoo Han4188e7d2013-08-05 18:02:02 +09004522static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304523 int header_size,
4524 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004525{
4526 struct flash_section_info *fsec = NULL;
4527 const u8 *p = fw->data;
4528
4529 p += header_size;
4530 while (p < (fw->data + fw->size)) {
4531 fsec = (struct flash_section_info *)p;
4532 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
4533 return fsec;
4534 p += 32;
4535 }
4536 return NULL;
4537}
4538
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304539static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
4540 u32 img_offset, u32 img_size, int hdr_size,
4541 u16 img_optype, bool *crc_match)
4542{
4543 u32 crc_offset;
4544 int status;
4545 u8 crc[4];
4546
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004547 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
4548 img_size - 4);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304549 if (status)
4550 return status;
4551
4552 crc_offset = hdr_size + img_offset + img_size - 4;
4553
4554 /* Skip flashing, if crc of flashed region matches */
4555 if (!memcmp(crc, p + crc_offset, 4))
4556 *crc_match = true;
4557 else
4558 *crc_match = false;
4559
4560 return status;
4561}
4562
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004563static int be_flash(struct be_adapter *adapter, const u8 *img,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004564 struct be_dma_mem *flash_cmd, int optype, int img_size,
4565 u32 img_offset)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004566{
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004567 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004568 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304569 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004570
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004571 while (total_bytes) {
4572 num_bytes = min_t(u32, 32*1024, total_bytes);
4573
4574 total_bytes -= num_bytes;
4575
4576 if (!total_bytes) {
4577 if (optype == OPTYPE_PHY_FW)
4578 flash_op = FLASHROM_OPER_PHY_FLASH;
4579 else
4580 flash_op = FLASHROM_OPER_FLASH;
4581 } else {
4582 if (optype == OPTYPE_PHY_FW)
4583 flash_op = FLASHROM_OPER_PHY_SAVE;
4584 else
4585 flash_op = FLASHROM_OPER_SAVE;
4586 }
4587
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004588 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004589 img += num_bytes;
4590 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004591 flash_op, img_offset +
4592 bytes_sent, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05304593 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304594 optype == OPTYPE_PHY_FW)
4595 break;
4596 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004597 return status;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004598
4599 bytes_sent += num_bytes;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004600 }
4601 return 0;
4602}
4603
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004604/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00004605static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304606 const struct firmware *fw,
4607 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00004608{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004609 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304610 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004611 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304612 int status, i, filehdr_size, num_comp;
4613 const struct flash_comp *pflashcomp;
4614 bool crc_match;
4615 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00004616
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004617 struct flash_comp gen3_flash_types[] = {
4618 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
4619 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
4620 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
4621 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
4622 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
4623 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
4624 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
4625 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
4626 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
4627 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
4628 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
4629 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
4630 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
4631 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
4632 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
4633 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
4634 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
4635 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
4636 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
4637 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004638 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004639
4640 struct flash_comp gen2_flash_types[] = {
4641 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
4642 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
4643 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
4644 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
4645 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
4646 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
4647 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
4648 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
4649 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
4650 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
4651 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
4652 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
4653 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
4654 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
4655 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
4656 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004657 };
4658
Sathya Perlaca34fe32012-11-06 17:48:56 +00004659 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004660 pflashcomp = gen3_flash_types;
4661 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08004662 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004663 } else {
4664 pflashcomp = gen2_flash_types;
4665 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08004666 num_comp = ARRAY_SIZE(gen2_flash_types);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004667 img_hdrs_size = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004668 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00004669
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004670 /* Get flash section info*/
4671 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4672 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304673 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004674 return -1;
4675 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004676 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004677 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00004678 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004679
4680 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
4681 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
4682 continue;
4683
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004684 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
4685 !phy_flashing_required(adapter))
4686 continue;
4687
4688 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304689 status = be_check_flash_crc(adapter, fw->data,
4690 pflashcomp[i].offset,
4691 pflashcomp[i].size,
4692 filehdr_size +
4693 img_hdrs_size,
4694 OPTYPE_REDBOOT, &crc_match);
4695 if (status) {
4696 dev_err(dev,
4697 "Could not get CRC for 0x%x region\n",
4698 pflashcomp[i].optype);
4699 continue;
4700 }
4701
4702 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00004703 continue;
4704 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00004705
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304706 p = fw->data + filehdr_size + pflashcomp[i].offset +
4707 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00004708 if (p + pflashcomp[i].size > fw->data + fw->size)
4709 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004710
4711 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004712 pflashcomp[i].size, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004713 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304714 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004715 pflashcomp[i].img_type);
4716 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00004717 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004718 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004719 return 0;
4720}
4721
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304722static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
4723{
4724 u32 img_type = le32_to_cpu(fsec_entry.type);
4725 u16 img_optype = le16_to_cpu(fsec_entry.optype);
4726
4727 if (img_optype != 0xFFFF)
4728 return img_optype;
4729
4730 switch (img_type) {
4731 case IMAGE_FIRMWARE_iSCSI:
4732 img_optype = OPTYPE_ISCSI_ACTIVE;
4733 break;
4734 case IMAGE_BOOT_CODE:
4735 img_optype = OPTYPE_REDBOOT;
4736 break;
4737 case IMAGE_OPTION_ROM_ISCSI:
4738 img_optype = OPTYPE_BIOS;
4739 break;
4740 case IMAGE_OPTION_ROM_PXE:
4741 img_optype = OPTYPE_PXE_BIOS;
4742 break;
4743 case IMAGE_OPTION_ROM_FCoE:
4744 img_optype = OPTYPE_FCOE_BIOS;
4745 break;
4746 case IMAGE_FIRMWARE_BACKUP_iSCSI:
4747 img_optype = OPTYPE_ISCSI_BACKUP;
4748 break;
4749 case IMAGE_NCSI:
4750 img_optype = OPTYPE_NCSI_FW;
4751 break;
4752 case IMAGE_FLASHISM_JUMPVECTOR:
4753 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
4754 break;
4755 case IMAGE_FIRMWARE_PHY:
4756 img_optype = OPTYPE_SH_PHY_FW;
4757 break;
4758 case IMAGE_REDBOOT_DIR:
4759 img_optype = OPTYPE_REDBOOT_DIR;
4760 break;
4761 case IMAGE_REDBOOT_CONFIG:
4762 img_optype = OPTYPE_REDBOOT_CONFIG;
4763 break;
4764 case IMAGE_UFI_DIR:
4765 img_optype = OPTYPE_UFI_DIR;
4766 break;
4767 default:
4768 break;
4769 }
4770
4771 return img_optype;
4772}
4773
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004774static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304775 const struct firmware *fw,
4776 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004777{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004778 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004779 bool crc_match, old_fw_img, flash_offset_support = true;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304780 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004781 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304782 u32 img_offset, img_size, img_type;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004783 u16 img_optype, flash_optype;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304784 int status, i, filehdr_size;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304785 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004786
4787 filehdr_size = sizeof(struct flash_file_hdr_g3);
4788 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4789 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304790 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304791 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004792 }
4793
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004794retry_flash:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004795 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4796 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4797 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304798 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4799 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4800 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004801
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304802 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004803 continue;
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004804
4805 if (flash_offset_support)
4806 flash_optype = OPTYPE_OFFSET_SPECIFIED;
4807 else
4808 flash_optype = img_optype;
4809
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304810 /* Don't bother verifying CRC if an old FW image is being
4811 * flashed
4812 */
4813 if (old_fw_img)
4814 goto flash;
4815
4816 status = be_check_flash_crc(adapter, fw->data, img_offset,
4817 img_size, filehdr_size +
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004818 img_hdrs_size, flash_optype,
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304819 &crc_match);
Kalesh AP4c600052014-05-30 19:06:26 +05304820 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4821 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004822 /* The current FW image on the card does not support
4823 * OFFSET based flashing. Retry using older mechanism
4824 * of OPTYPE based flashing
4825 */
4826 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4827 flash_offset_support = false;
4828 goto retry_flash;
4829 }
4830
4831 /* The current FW image on the card does not recognize
4832 * the new FLASH op_type. The FW download is partially
4833 * complete. Reboot the server now to enable FW image
4834 * to recognize the new FLASH op_type. To complete the
4835 * remaining process, download the same FW again after
4836 * the reboot.
4837 */
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304838 dev_err(dev, "Flash incomplete. Reset the server\n");
4839 dev_err(dev, "Download FW image again after reset\n");
4840 return -EAGAIN;
4841 } else if (status) {
4842 dev_err(dev, "Could not get CRC for 0x%x region\n",
4843 img_optype);
4844 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004845 }
4846
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304847 if (crc_match)
4848 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004849
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304850flash:
4851 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004852 if (p + img_size > fw->data + fw->size)
4853 return -1;
4854
Vasundhara Volam70a7b522015-02-06 08:18:39 -05004855 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
4856 img_offset);
4857
4858 /* The current FW image on the card does not support OFFSET
4859 * based flashing. Retry using older mechanism of OPTYPE based
4860 * flashing
4861 */
4862 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
4863 flash_optype == OPTYPE_OFFSET_SPECIFIED) {
4864 flash_offset_support = false;
4865 goto retry_flash;
4866 }
4867
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304868 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4869 * UFI_DIR region
4870 */
Kalesh AP4c600052014-05-30 19:06:26 +05304871 if (old_fw_img &&
4872 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4873 (img_optype == OPTYPE_UFI_DIR &&
4874 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304875 continue;
4876 } else if (status) {
4877 dev_err(dev, "Flashing section type 0x%x failed\n",
4878 img_type);
4879 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004880 }
4881 }
4882 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004883}
4884
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004885static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304886 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004887{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004888#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4889#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304890 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004891 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004892 const u8 *data_ptr = NULL;
4893 u8 *dest_image_ptr = NULL;
4894 size_t image_size = 0;
4895 u32 chunk_size = 0;
4896 u32 data_written = 0;
4897 u32 offset = 0;
4898 int status = 0;
4899 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004900 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004901
4902 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304903 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304904 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004905 }
4906
4907 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4908 + LANCER_FW_DOWNLOAD_CHUNK;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05304909 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
4910 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304911 if (!flash_cmd.va)
4912 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004913
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004914 dest_image_ptr = flash_cmd.va +
4915 sizeof(struct lancer_cmd_req_write_object);
4916 image_size = fw->size;
4917 data_ptr = fw->data;
4918
4919 while (image_size) {
4920 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4921
4922 /* Copy the image chunk content. */
4923 memcpy(dest_image_ptr, data_ptr, chunk_size);
4924
4925 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004926 chunk_size, offset,
4927 LANCER_FW_DOWNLOAD_LOCATION,
4928 &data_written, &change_status,
4929 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004930 if (status)
4931 break;
4932
4933 offset += data_written;
4934 data_ptr += data_written;
4935 image_size -= data_written;
4936 }
4937
4938 if (!status) {
4939 /* Commit the FW written */
4940 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004941 0, offset,
4942 LANCER_FW_DOWNLOAD_LOCATION,
4943 &data_written, &change_status,
4944 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004945 }
4946
Kalesh APbb864e02014-09-02 09:56:51 +05304947 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004948 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304949 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304950 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004951 }
4952
Kalesh APbb864e02014-09-02 09:56:51 +05304953 dev_info(dev, "Firmware flashed successfully\n");
4954
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004955 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304956 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004957 status = lancer_physdev_ctrl(adapter,
4958 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004959 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304960 dev_err(dev, "Adapter busy, could not reset FW\n");
4961 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004962 }
4963 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304964 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004965 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304966
4967 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004968}
4969
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004970/* Check if the flash image file is compatible with the adapter that
4971 * is being flashed.
4972 */
4973static bool be_check_ufi_compatibility(struct be_adapter *adapter,
4974 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004975{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004976 if (!fhdr) {
4977 dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
4978 return -1;
4979 }
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004980
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004981 /* First letter of the build version is used to identify
4982 * which chip this image file is meant for.
4983 */
4984 switch (fhdr->build[0]) {
4985 case BLD_STR_UFI_TYPE_SH:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004986 if (!skyhawk_chip(adapter))
4987 return false;
4988 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004989 case BLD_STR_UFI_TYPE_BE3:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004990 if (!BE3_chip(adapter))
4991 return false;
4992 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004993 case BLD_STR_UFI_TYPE_BE2:
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04004994 if (!BE2_chip(adapter))
4995 return false;
4996 break;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05004997 default:
4998 return false;
4999 }
Vasundhara Volama6e6ff62015-05-06 05:30:37 -04005000
5001 return (fhdr->asic_type_rev >= adapter->asic_rev);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005002}
5003
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005004static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
5005{
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005006 struct device *dev = &adapter->pdev->dev;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00005007 struct flash_file_hdr_g3 *fhdr3;
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005008 struct image_hdr *img_hdr_ptr;
5009 int status = 0, i, num_imgs;
Ajit Khaparde84517482009-09-04 03:12:16 +00005010 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00005011
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005012 fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
5013 if (!be_check_ufi_compatibility(adapter, fhdr3)) {
5014 dev_err(dev, "Flash image is not compatible with adapter\n");
5015 return -EINVAL;
Ajit Khaparde84517482009-09-04 03:12:16 +00005016 }
5017
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005018 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305019 flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
5020 GFP_KERNEL);
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005021 if (!flash_cmd.va)
5022 return -ENOMEM;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005023
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005024 num_imgs = le32_to_cpu(fhdr3->num_imgs);
5025 for (i = 0; i < num_imgs; i++) {
5026 img_hdr_ptr = (struct image_hdr *)(fw->data +
5027 (sizeof(struct flash_file_hdr_g3) +
5028 i * sizeof(struct image_hdr)));
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005029 if (!BE2_chip(adapter) &&
5030 le32_to_cpu(img_hdr_ptr->imageid) != 1)
5031 continue;
5032
5033 if (skyhawk_chip(adapter))
5034 status = be_flash_skyhawk(adapter, fw, &flash_cmd,
5035 num_imgs);
5036 else
5037 status = be_flash_BEx(adapter, fw, &flash_cmd,
5038 num_imgs);
Ajit Khaparde84517482009-09-04 03:12:16 +00005039 }
5040
Vasundhara Volam5d3acd02015-02-06 08:18:37 -05005041 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
5042 if (!status)
5043 dev_info(dev, "Firmware flashed successfully\n");
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00005044
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005045 return status;
5046}
5047
5048int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
5049{
5050 const struct firmware *fw;
5051 int status;
5052
5053 if (!netif_running(adapter->netdev)) {
5054 dev_err(&adapter->pdev->dev,
5055 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05305056 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00005057 }
5058
5059 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
5060 if (status)
5061 goto fw_exit;
5062
5063 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
5064
5065 if (lancer_chip(adapter))
5066 status = lancer_fw_download(adapter, fw);
5067 else
5068 status = be_fw_download(adapter, fw);
5069
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005070 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05305071 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00005072
Ajit Khaparde84517482009-09-04 03:12:16 +00005073fw_exit:
5074 release_firmware(fw);
5075 return status;
5076}
5077
Roopa Prabhuadd511b2015-01-29 22:40:12 -08005078static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
5079 u16 flags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005080{
5081 struct be_adapter *adapter = netdev_priv(dev);
5082 struct nlattr *attr, *br_spec;
5083 int rem;
5084 int status = 0;
5085 u16 mode = 0;
5086
5087 if (!sriov_enabled(adapter))
5088 return -EOPNOTSUPP;
5089
5090 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01005091 if (!br_spec)
5092 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005093
5094 nla_for_each_nested(attr, br_spec, rem) {
5095 if (nla_type(attr) != IFLA_BRIDGE_MODE)
5096 continue;
5097
Thomas Grafb7c1a312014-11-26 13:42:17 +01005098 if (nla_len(attr) < sizeof(mode))
5099 return -EINVAL;
5100
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005101 mode = nla_get_u16(attr);
5102 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5103 return -EINVAL;
5104
5105 status = be_cmd_set_hsw_config(adapter, 0, 0,
5106 adapter->if_handle,
5107 mode == BRIDGE_MODE_VEPA ?
5108 PORT_FWD_TYPE_VEPA :
Kalesh APe7bcbd72015-05-06 05:30:32 -04005109 PORT_FWD_TYPE_VEB, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005110 if (status)
5111 goto err;
5112
5113 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5114 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5115
5116 return status;
5117 }
5118err:
5119 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5120 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5121
5122 return status;
5123}
5124
5125static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Nicolas Dichtel46c264d2015-04-28 18:33:49 +02005126 struct net_device *dev, u32 filter_mask,
5127 int nlflags)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005128{
5129 struct be_adapter *adapter = netdev_priv(dev);
5130 int status = 0;
5131 u8 hsw_mode;
5132
5133 if (!sriov_enabled(adapter))
5134 return 0;
5135
5136 /* BE and Lancer chips support VEB mode only */
5137 if (BEx_chip(adapter) || lancer_chip(adapter)) {
5138 hsw_mode = PORT_FWD_TYPE_VEB;
5139 } else {
5140 status = be_cmd_get_hsw_config(adapter, NULL, 0,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005141 adapter->if_handle, &hsw_mode,
5142 NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005143 if (status)
5144 return 0;
5145 }
5146
5147 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5148 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01005149 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
Scott Feldman7d4f8d82015-06-22 00:27:17 -07005150 0, 0, nlflags, filter_mask, NULL);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005151}
5152
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305153#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005154/* VxLAN offload Notes:
5155 *
5156 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
5157 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
5158 * is expected to work across all types of IP tunnels once exported. Skyhawk
5159 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305160 * offloads in hw_enc_features only when a VxLAN port is added. If other (non
5161 * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
5162 * those other tunnels are unexported on the fly through ndo_features_check().
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005163 *
5164 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
5165 * adds more than one port, disable offloads and don't re-enable them again
5166 * until after all the tunnels are removed.
5167 */
Sathya Perlac9c47142014-03-27 10:46:19 +05305168static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5169 __be16 port)
5170{
5171 struct be_adapter *adapter = netdev_priv(netdev);
5172 struct device *dev = &adapter->pdev->dev;
5173 int status;
5174
5175 if (lancer_chip(adapter) || BEx_chip(adapter))
5176 return;
5177
5178 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05305179 dev_info(dev,
5180 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005181 dev_info(dev, "Disabling VxLAN offloads\n");
5182 adapter->vxlan_port_count++;
5183 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05305184 }
5185
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005186 if (adapter->vxlan_port_count++ >= 1)
5187 return;
5188
Sathya Perlac9c47142014-03-27 10:46:19 +05305189 status = be_cmd_manage_iface(adapter, adapter->if_handle,
5190 OP_CONVERT_NORMAL_TO_TUNNEL);
5191 if (status) {
5192 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
5193 goto err;
5194 }
5195
5196 status = be_cmd_set_vxlan_port(adapter, port);
5197 if (status) {
5198 dev_warn(dev, "Failed to add VxLAN port\n");
5199 goto err;
5200 }
5201 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
5202 adapter->vxlan_port = port;
5203
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005204 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5205 NETIF_F_TSO | NETIF_F_TSO6 |
5206 NETIF_F_GSO_UDP_TUNNEL;
5207 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05305208 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005209
Sathya Perlac9c47142014-03-27 10:46:19 +05305210 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
5211 be16_to_cpu(port));
5212 return;
5213err:
5214 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05305215}
5216
5217static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
5218 __be16 port)
5219{
5220 struct be_adapter *adapter = netdev_priv(netdev);
5221
5222 if (lancer_chip(adapter) || BEx_chip(adapter))
5223 return;
5224
5225 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005226 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05305227
5228 be_disable_vxlan_offloads(adapter);
5229
5230 dev_info(&adapter->pdev->dev,
5231 "Disabled VxLAN offloads for UDP port %d\n",
5232 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05005233done:
5234 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05305235}
Joe Stringer725d5482014-11-13 16:38:13 -08005236
Jesse Gross5f352272014-12-23 22:37:26 -08005237static netdev_features_t be_features_check(struct sk_buff *skb,
5238 struct net_device *dev,
5239 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08005240{
Sriharsha Basavapatna16dde0d2015-01-15 16:08:43 +05305241 struct be_adapter *adapter = netdev_priv(dev);
5242 u8 l4_hdr = 0;
5243
5244 /* The code below restricts offload features for some tunneled packets.
5245 * Offload features for normal (non tunnel) packets are unchanged.
5246 */
5247 if (!skb->encapsulation ||
5248 !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5249 return features;
5250
5251 /* It's an encapsulated packet and VxLAN offloads are enabled. We
5252 * should disable tunnel offload features if it's not a VxLAN packet,
5253 * as tunnel offloads have been enabled only for VxLAN. This is done to
5254 * allow other tunneled traffic like GRE work fine while VxLAN
5255 * offloads are configured in Skyhawk-R.
5256 */
5257 switch (vlan_get_protocol(skb)) {
5258 case htons(ETH_P_IP):
5259 l4_hdr = ip_hdr(skb)->protocol;
5260 break;
5261 case htons(ETH_P_IPV6):
5262 l4_hdr = ipv6_hdr(skb)->nexthdr;
5263 break;
5264 default:
5265 return features;
5266 }
5267
5268 if (l4_hdr != IPPROTO_UDP ||
5269 skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5270 skb->inner_protocol != htons(ETH_P_TEB) ||
5271 skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5272 sizeof(struct udphdr) + sizeof(struct vxlanhdr))
5273 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
5274
5275 return features;
Joe Stringer725d5482014-11-13 16:38:13 -08005276}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305277#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05305278
stephen hemmingere5686ad2012-01-05 19:10:25 +00005279static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005280 .ndo_open = be_open,
5281 .ndo_stop = be_close,
5282 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00005283 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005284 .ndo_set_mac_address = be_mac_addr_set,
5285 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00005286 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005287 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005288 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
5289 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00005290 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00005291 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04005292 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00005293 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05305294 .ndo_set_vf_link_state = be_set_vf_link_state,
Kalesh APe7bcbd72015-05-06 05:30:32 -04005295 .ndo_set_vf_spoofchk = be_set_vf_spoofchk,
Ivan Vecera66268732011-12-08 01:31:21 +00005296#ifdef CONFIG_NET_POLL_CONTROLLER
5297 .ndo_poll_controller = be_netpoll,
5298#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05005299 .ndo_bridge_setlink = be_ndo_bridge_setlink,
5300 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305301#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05305302 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05305303#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305304#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05305305 .ndo_add_vxlan_port = be_add_vxlan_port,
5306 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08005307 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05305308#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005309};
5310
5311static void be_netdev_init(struct net_device *netdev)
5312{
5313 struct be_adapter *adapter = netdev_priv(netdev);
5314
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005315 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005316 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00005317 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00005318 if (be_multi_rxq(adapter))
5319 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00005320
5321 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00005322 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00005323
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07005324 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00005325 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00005326
Ajit Khapardefbc13f02012-03-18 06:23:21 +00005327 netdev->priv_flags |= IFF_UNICAST_FLT;
5328
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005329 netdev->flags |= IFF_MULTICAST;
5330
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00005331 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00005332
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005333 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005334
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00005335 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005336}
5337
Kalesh AP87ac1a52015-02-23 04:20:15 -05005338static void be_cleanup(struct be_adapter *adapter)
5339{
5340 struct net_device *netdev = adapter->netdev;
5341
5342 rtnl_lock();
5343 netif_device_detach(netdev);
5344 if (netif_running(netdev))
5345 be_close(netdev);
5346 rtnl_unlock();
5347
5348 be_clear(adapter);
5349}
5350
Kalesh AP484d76f2015-02-23 04:20:14 -05005351static int be_resume(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005352{
Kalesh APd0e1b312015-02-23 04:20:12 -05005353 struct net_device *netdev = adapter->netdev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005354 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005355
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005356 status = be_setup(adapter);
5357 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005358 return status;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005359
Kalesh APd0e1b312015-02-23 04:20:12 -05005360 if (netif_running(netdev)) {
5361 status = be_open(netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005362 if (status)
Kalesh AP484d76f2015-02-23 04:20:14 -05005363 return status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005364 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005365
Kalesh APd0e1b312015-02-23 04:20:12 -05005366 netif_device_attach(netdev);
5367
Kalesh AP484d76f2015-02-23 04:20:14 -05005368 return 0;
5369}
5370
5371static int be_err_recover(struct be_adapter *adapter)
5372{
5373 struct device *dev = &adapter->pdev->dev;
5374 int status;
5375
5376 status = be_resume(adapter);
5377 if (status)
5378 goto err;
5379
Sathya Perla9fa465c2015-02-23 04:20:13 -05005380 dev_info(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005381 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005382err:
Sathya Perla9fa465c2015-02-23 04:20:13 -05005383 if (be_physfn(adapter))
Somnath Kotur4bebb562013-12-05 12:07:55 +05305384 dev_err(dev, "Adapter recovery failed\n");
Sathya Perla9fa465c2015-02-23 04:20:13 -05005385 else
5386 dev_err(dev, "Re-trying adapter recovery\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005387
5388 return status;
5389}
5390
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005391static void be_err_detection_task(struct work_struct *work)
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005392{
5393 struct be_adapter *adapter =
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005394 container_of(work, struct be_adapter,
5395 be_err_detection_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005396 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005397
5398 be_detect_error(adapter);
5399
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305400 if (be_check_error(adapter, BE_ERROR_HW)) {
Kalesh AP87ac1a52015-02-23 04:20:15 -05005401 be_cleanup(adapter);
Kalesh APd0e1b312015-02-23 04:20:12 -05005402
5403 /* As of now error recovery support is in Lancer only */
5404 if (lancer_chip(adapter))
5405 status = be_err_recover(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005406 }
5407
Sathya Perla9fa465c2015-02-23 04:20:13 -05005408 /* Always attempt recovery on VFs */
5409 if (!status || be_virtfn(adapter))
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005410 be_schedule_err_detection(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005411}
5412
Vasundhara Volam21252372015-02-06 08:18:42 -05005413static void be_log_sfp_info(struct be_adapter *adapter)
5414{
5415 int status;
5416
5417 status = be_cmd_query_sfp_info(adapter);
5418 if (!status) {
5419 dev_err(&adapter->pdev->dev,
5420 "Unqualified SFP+ detected on %c from %s part no: %s",
5421 adapter->port_name, adapter->phy.vendor_name,
5422 adapter->phy.vendor_pn);
5423 }
5424 adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP;
5425}
5426
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005427static void be_worker(struct work_struct *work)
5428{
5429 struct be_adapter *adapter =
5430 container_of(work, struct be_adapter, work.work);
5431 struct be_rx_obj *rxo;
5432 int i;
5433
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005434 /* when interrupts are not yet enabled, just reap any pending
Sathya Perla78fad34e2015-02-23 04:20:08 -05005435 * mcc completions
5436 */
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005437 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00005438 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005439 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00005440 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005441 goto reschedule;
5442 }
5443
5444 if (!adapter->stats_cmd_sent) {
5445 if (lancer_chip(adapter))
5446 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05305447 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005448 else
5449 be_cmd_get_stats(adapter, &adapter->stats_cmd);
5450 }
5451
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05305452 if (be_physfn(adapter) &&
5453 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00005454 be_cmd_get_die_temperature(adapter);
5455
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005456 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05305457 /* Replenish RX-queues starved due to memory
5458 * allocation failures.
5459 */
5460 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05305461 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005462 }
5463
Padmanabh Ratnakar20947772015-05-06 05:30:33 -04005464 /* EQ-delay update for Skyhawk is done while notifying EQ */
5465 if (!skyhawk_chip(adapter))
5466 be_eqd_update(adapter, false);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00005467
Vasundhara Volam21252372015-02-06 08:18:42 -05005468 if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP)
5469 be_log_sfp_info(adapter);
5470
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00005471reschedule:
5472 adapter->work_counter++;
5473 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
5474}
5475
Sathya Perla78fad34e2015-02-23 04:20:08 -05005476static void be_unmap_pci_bars(struct be_adapter *adapter)
5477{
5478 if (adapter->csr)
5479 pci_iounmap(adapter->pdev, adapter->csr);
5480 if (adapter->db)
5481 pci_iounmap(adapter->pdev, adapter->db);
5482}
5483
5484static int db_bar(struct be_adapter *adapter)
5485{
Kalesh AP18c57c72015-05-06 05:30:38 -04005486 if (lancer_chip(adapter) || be_virtfn(adapter))
Sathya Perla78fad34e2015-02-23 04:20:08 -05005487 return 0;
5488 else
5489 return 4;
5490}
5491
5492static int be_roce_map_pci_bars(struct be_adapter *adapter)
5493{
5494 if (skyhawk_chip(adapter)) {
5495 adapter->roce_db.size = 4096;
5496 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5497 db_bar(adapter));
5498 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5499 db_bar(adapter));
5500 }
5501 return 0;
5502}
5503
5504static int be_map_pci_bars(struct be_adapter *adapter)
5505{
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005506 struct pci_dev *pdev = adapter->pdev;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005507 u8 __iomem *addr;
5508 u32 sli_intf;
5509
5510 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5511 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5512 SLI_INTF_FAMILY_SHIFT;
5513 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5514
5515 if (BEx_chip(adapter) && be_physfn(adapter)) {
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005516 adapter->csr = pci_iomap(pdev, 2, 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005517 if (!adapter->csr)
5518 return -ENOMEM;
5519 }
5520
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005521 addr = pci_iomap(pdev, db_bar(adapter), 0);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005522 if (!addr)
5523 goto pci_map_err;
5524 adapter->db = addr;
5525
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005526 if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5527 if (be_physfn(adapter)) {
5528 /* PCICFG is the 2nd BAR in BE2 */
5529 addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5530 if (!addr)
5531 goto pci_map_err;
5532 adapter->pcicfg = addr;
5533 } else {
5534 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5535 }
5536 }
5537
Sathya Perla78fad34e2015-02-23 04:20:08 -05005538 be_roce_map_pci_bars(adapter);
5539 return 0;
5540
5541pci_map_err:
David S. Miller0fa74a4b2015-03-20 18:51:09 -04005542 dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla78fad34e2015-02-23 04:20:08 -05005543 be_unmap_pci_bars(adapter);
5544 return -ENOMEM;
5545}
5546
5547static void be_drv_cleanup(struct be_adapter *adapter)
5548{
5549 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5550 struct device *dev = &adapter->pdev->dev;
5551
5552 if (mem->va)
5553 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5554
5555 mem = &adapter->rx_filter;
5556 if (mem->va)
5557 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5558
5559 mem = &adapter->stats_cmd;
5560 if (mem->va)
5561 dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5562}
5563
5564/* Allocate and initialize various fields in be_adapter struct */
5565static int be_drv_init(struct be_adapter *adapter)
5566{
5567 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5568 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5569 struct be_dma_mem *rx_filter = &adapter->rx_filter;
5570 struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5571 struct device *dev = &adapter->pdev->dev;
5572 int status = 0;
5573
5574 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Sriharsha Basavapatnae51000db2015-06-05 15:33:59 +05305575 mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
5576 &mbox_mem_alloc->dma,
5577 GFP_KERNEL);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005578 if (!mbox_mem_alloc->va)
5579 return -ENOMEM;
5580
5581 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5582 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5583 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005584
5585 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5586 rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
5587 &rx_filter->dma, GFP_KERNEL);
5588 if (!rx_filter->va) {
5589 status = -ENOMEM;
5590 goto free_mbox;
5591 }
5592
5593 if (lancer_chip(adapter))
5594 stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5595 else if (BE2_chip(adapter))
5596 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5597 else if (BE3_chip(adapter))
5598 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5599 else
5600 stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5601 stats_cmd->va = dma_zalloc_coherent(dev, stats_cmd->size,
5602 &stats_cmd->dma, GFP_KERNEL);
5603 if (!stats_cmd->va) {
5604 status = -ENOMEM;
5605 goto free_rx_filter;
5606 }
5607
5608 mutex_init(&adapter->mbox_lock);
5609 spin_lock_init(&adapter->mcc_lock);
5610 spin_lock_init(&adapter->mcc_cq_lock);
5611 init_completion(&adapter->et_cmd_compl);
5612
5613 pci_save_state(adapter->pdev);
5614
5615 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005616 INIT_DELAYED_WORK(&adapter->be_err_detection_work,
5617 be_err_detection_task);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005618
5619 adapter->rx_fc = true;
5620 adapter->tx_fc = true;
5621
5622 /* Must be a power of 2 or else MODULO will BUG_ON */
5623 adapter->be_get_temp_freq = 64;
Sathya Perla78fad34e2015-02-23 04:20:08 -05005624
5625 return 0;
5626
5627free_rx_filter:
5628 dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5629free_mbox:
5630 dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5631 mbox_mem_alloc->dma);
5632 return status;
5633}
5634
5635static void be_remove(struct pci_dev *pdev)
5636{
5637 struct be_adapter *adapter = pci_get_drvdata(pdev);
5638
5639 if (!adapter)
5640 return;
5641
5642 be_roce_dev_remove(adapter);
5643 be_intr_set(adapter, false);
5644
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005645 be_cancel_err_detection(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005646
5647 unregister_netdev(adapter->netdev);
5648
5649 be_clear(adapter);
5650
5651 /* tell fw we're done with firing cmds */
5652 be_cmd_fw_clean(adapter);
5653
5654 be_unmap_pci_bars(adapter);
5655 be_drv_cleanup(adapter);
5656
5657 pci_disable_pcie_error_reporting(pdev);
5658
5659 pci_release_regions(pdev);
5660 pci_disable_device(pdev);
5661
5662 free_netdev(adapter->netdev);
5663}
5664
Arnd Bergmann9a032592015-05-18 23:06:45 +02005665static ssize_t be_hwmon_show_temp(struct device *dev,
5666 struct device_attribute *dev_attr,
5667 char *buf)
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305668{
5669 struct be_adapter *adapter = dev_get_drvdata(dev);
5670
5671 /* Unit: millidegree Celsius */
5672 if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5673 return -EIO;
5674 else
5675 return sprintf(buf, "%u\n",
5676 adapter->hwmon_info.be_on_die_temp * 1000);
5677}
5678
5679static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
5680 be_hwmon_show_temp, NULL, 1);
5681
5682static struct attribute *be_hwmon_attrs[] = {
5683 &sensor_dev_attr_temp1_input.dev_attr.attr,
5684 NULL
5685};
5686
5687ATTRIBUTE_GROUPS(be_hwmon);
5688
Sathya Perlad3791422012-09-28 04:39:44 +00005689static char *mc_name(struct be_adapter *adapter)
5690{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05305691 char *str = ""; /* default */
5692
5693 switch (adapter->mc_type) {
5694 case UMC:
5695 str = "UMC";
5696 break;
5697 case FLEX10:
5698 str = "FLEX10";
5699 break;
5700 case vNIC1:
5701 str = "vNIC-1";
5702 break;
5703 case nPAR:
5704 str = "nPAR";
5705 break;
5706 case UFP:
5707 str = "UFP";
5708 break;
5709 case vNIC2:
5710 str = "vNIC-2";
5711 break;
5712 default:
5713 str = "";
5714 }
5715
5716 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00005717}
5718
5719static inline char *func_name(struct be_adapter *adapter)
5720{
5721 return be_physfn(adapter) ? "PF" : "VF";
5722}
5723
Sathya Perlaf7062ee2015-02-06 08:18:35 -05005724static inline char *nic_name(struct pci_dev *pdev)
5725{
5726 switch (pdev->device) {
5727 case OC_DEVICE_ID1:
5728 return OC_NAME;
5729 case OC_DEVICE_ID2:
5730 return OC_NAME_BE;
5731 case OC_DEVICE_ID3:
5732 case OC_DEVICE_ID4:
5733 return OC_NAME_LANCER;
5734 case BE_DEVICE_ID2:
5735 return BE3_NAME;
5736 case OC_DEVICE_ID5:
5737 case OC_DEVICE_ID6:
5738 return OC_NAME_SH;
5739 default:
5740 return BE_NAME;
5741 }
5742}
5743
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00005744static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005745{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005746 struct be_adapter *adapter;
5747 struct net_device *netdev;
Vasundhara Volam21252372015-02-06 08:18:42 -05005748 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005749
Sathya Perlaacbafeb2014-09-02 09:56:46 +05305750 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
5751
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005752 status = pci_enable_device(pdev);
5753 if (status)
5754 goto do_none;
5755
5756 status = pci_request_regions(pdev, DRV_NAME);
5757 if (status)
5758 goto disable_dev;
5759 pci_set_master(pdev);
5760
Sathya Perla7f640062012-06-05 19:37:20 +00005761 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05305762 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005763 status = -ENOMEM;
5764 goto rel_reg;
5765 }
5766 adapter = netdev_priv(netdev);
5767 adapter->pdev = pdev;
5768 pci_set_drvdata(pdev, adapter);
5769 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005770 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005771
Russell King4c15c242013-06-26 23:49:11 +01005772 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005773 if (!status) {
5774 netdev->features |= NETIF_F_HIGHDMA;
5775 } else {
Russell King4c15c242013-06-26 23:49:11 +01005776 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005777 if (status) {
5778 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5779 goto free_netdev;
5780 }
5781 }
5782
Kalesh AP2f951a92014-09-12 17:39:21 +05305783 status = pci_enable_pcie_error_reporting(pdev);
5784 if (!status)
5785 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00005786
Sathya Perla78fad34e2015-02-23 04:20:08 -05005787 status = be_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005788 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00005789 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005790
Sathya Perla78fad34e2015-02-23 04:20:08 -05005791 status = be_drv_init(adapter);
5792 if (status)
5793 goto unmap_bars;
5794
Sathya Perla5fb379e2009-06-18 00:02:59 +00005795 status = be_setup(adapter);
5796 if (status)
Sathya Perla78fad34e2015-02-23 04:20:08 -05005797 goto drv_cleanup;
Sathya Perla2243e2e2009-11-22 22:02:03 +00005798
Sathya Perla3abcded2010-10-03 22:12:27 -07005799 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005800 status = register_netdev(netdev);
5801 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00005802 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005803
Parav Pandit045508a2012-03-26 14:27:13 +00005804 be_roce_dev_add(adapter);
5805
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005806 be_schedule_err_detection(adapter);
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00005807
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305808 /* On Die temperature not supported for VF. */
Arnd Bergmann9a032592015-05-18 23:06:45 +02005809 if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
Venkata Duvvuru29e91222015-05-13 13:00:12 +05305810 adapter->hwmon_info.hwmon_dev =
5811 devm_hwmon_device_register_with_groups(&pdev->dev,
5812 DRV_NAME,
5813 adapter,
5814 be_hwmon_groups);
5815 adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5816 }
5817
Sathya Perlad3791422012-09-28 04:39:44 +00005818 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
Vasundhara Volam21252372015-02-06 08:18:42 -05005819 func_name(adapter), mc_name(adapter), adapter->port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00005820
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005821 return 0;
5822
Sathya Perla5fb379e2009-06-18 00:02:59 +00005823unsetup:
5824 be_clear(adapter);
Sathya Perla78fad34e2015-02-23 04:20:08 -05005825drv_cleanup:
5826 be_drv_cleanup(adapter);
5827unmap_bars:
5828 be_unmap_pci_bars(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00005829free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00005830 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005831rel_reg:
5832 pci_release_regions(pdev);
5833disable_dev:
5834 pci_disable_device(pdev);
5835do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005836 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005837 return status;
5838}
5839
5840static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5841{
5842 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005843
Suresh Reddy76a9e082014-01-15 13:23:40 +05305844 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005845 be_setup_wol(adapter, true);
5846
Ajit Khaparded4360d62013-11-22 12:51:09 -06005847 be_intr_set(adapter, false);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005848 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005849
Kalesh AP87ac1a52015-02-23 04:20:15 -05005850 be_cleanup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005851
5852 pci_save_state(pdev);
5853 pci_disable_device(pdev);
5854 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5855 return 0;
5856}
5857
Kalesh AP484d76f2015-02-23 04:20:14 -05005858static int be_pci_resume(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005859{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005860 struct be_adapter *adapter = pci_get_drvdata(pdev);
Kalesh AP484d76f2015-02-23 04:20:14 -05005861 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005862
5863 status = pci_enable_device(pdev);
5864 if (status)
5865 return status;
5866
Yijing Wang1ca01512013-06-27 20:53:42 +08005867 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005868 pci_restore_state(pdev);
5869
Kalesh AP484d76f2015-02-23 04:20:14 -05005870 status = be_resume(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005871 if (status)
5872 return status;
5873
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005874 be_schedule_err_detection(adapter);
5875
Suresh Reddy76a9e082014-01-15 13:23:40 +05305876 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005877 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005878
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005879 return 0;
5880}
5881
Sathya Perla82456b02010-02-17 01:35:37 +00005882/*
5883 * An FLR will stop BE from DMAing any data.
5884 */
5885static void be_shutdown(struct pci_dev *pdev)
5886{
5887 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005888
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005889 if (!adapter)
5890 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005891
Devesh Sharmad114f992014-06-10 19:32:15 +05305892 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005893 cancel_delayed_work_sync(&adapter->work);
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005894 be_cancel_err_detection(adapter);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005895
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005896 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005897
Ajit Khaparde57841862011-04-06 18:08:43 +00005898 be_cmd_reset_function(adapter);
5899
Sathya Perla82456b02010-02-17 01:35:37 +00005900 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005901}
5902
Sathya Perlacf588472010-02-14 21:22:01 +00005903static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305904 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005905{
5906 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005907
5908 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5909
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305910 if (!be_check_error(adapter, BE_ERROR_EEH)) {
5911 be_set_error(adapter, BE_ERROR_EEH);
Sathya Perlacf588472010-02-14 21:22:01 +00005912
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005913 be_cancel_err_detection(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005914
Kalesh AP87ac1a52015-02-23 04:20:15 -05005915 be_cleanup(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005916 }
Sathya Perlacf588472010-02-14 21:22:01 +00005917
5918 if (state == pci_channel_io_perm_failure)
5919 return PCI_ERS_RESULT_DISCONNECT;
5920
5921 pci_disable_device(pdev);
5922
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005923 /* The error could cause the FW to trigger a flash debug dump.
5924 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005925 * can cause it not to recover; wait for it to finish.
5926 * Wait only for first function as it is needed only once per
5927 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005928 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005929 if (pdev->devfn == 0)
5930 ssleep(30);
5931
Sathya Perlacf588472010-02-14 21:22:01 +00005932 return PCI_ERS_RESULT_NEED_RESET;
5933}
5934
5935static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5936{
5937 struct be_adapter *adapter = pci_get_drvdata(pdev);
5938 int status;
5939
5940 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005941
5942 status = pci_enable_device(pdev);
5943 if (status)
5944 return PCI_ERS_RESULT_DISCONNECT;
5945
5946 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005947 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005948 pci_restore_state(pdev);
5949
5950 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005951 dev_info(&adapter->pdev->dev,
5952 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005953 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005954 if (status)
5955 return PCI_ERS_RESULT_DISCONNECT;
5956
Sathya Perlad6b6d982012-09-05 01:56:48 +00005957 pci_cleanup_aer_uncorrect_error_status(pdev);
Venkata Duvvuru954f6822015-05-13 13:00:13 +05305958 be_clear_error(adapter, BE_CLEAR_ALL);
Sathya Perlacf588472010-02-14 21:22:01 +00005959 return PCI_ERS_RESULT_RECOVERED;
5960}
5961
5962static void be_eeh_resume(struct pci_dev *pdev)
5963{
5964 int status = 0;
5965 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005966
5967 dev_info(&adapter->pdev->dev, "EEH resume\n");
5968
5969 pci_save_state(pdev);
5970
Kalesh AP484d76f2015-02-23 04:20:14 -05005971 status = be_resume(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005972 if (status)
5973 goto err;
5974
Sathya Perlaeb7dd462015-02-23 04:20:11 -05005975 be_schedule_err_detection(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005976 return;
5977err:
5978 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005979}
5980
Vasundhara Volamace40af2015-03-04 00:44:34 -05005981static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
5982{
5983 struct be_adapter *adapter = pci_get_drvdata(pdev);
5984 u16 num_vf_qs;
5985 int status;
5986
5987 if (!num_vfs)
5988 be_vf_clear(adapter);
5989
5990 adapter->num_vfs = num_vfs;
5991
5992 if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
5993 dev_warn(&pdev->dev,
5994 "Cannot disable VFs while they are assigned\n");
5995 return -EBUSY;
5996 }
5997
5998 /* When the HW is in SRIOV capable configuration, the PF-pool resources
5999 * are equally distributed across the max-number of VFs. The user may
6000 * request only a subset of the max-vfs to be enabled.
6001 * Based on num_vfs, redistribute the resources across num_vfs so that
6002 * each VF will have access to more number of resources.
6003 * This facility is not available in BE3 FW.
6004 * Also, this is done by FW in Lancer chip.
6005 */
6006 if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6007 num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
6008 status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6009 adapter->num_vfs, num_vf_qs);
6010 if (status)
6011 dev_err(&pdev->dev,
6012 "Failed to optimize SR-IOV resources\n");
6013 }
6014
6015 status = be_get_resources(adapter);
6016 if (status)
6017 return be_cmd_status(status);
6018
6019 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6020 rtnl_lock();
6021 status = be_update_queues(adapter);
6022 rtnl_unlock();
6023 if (status)
6024 return be_cmd_status(status);
6025
6026 if (adapter->num_vfs)
6027 status = be_vf_setup(adapter);
6028
6029 if (!status)
6030 return adapter->num_vfs;
6031
6032 return 0;
6033}
6034
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07006035static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00006036 .error_detected = be_eeh_err_detected,
6037 .slot_reset = be_eeh_reset,
6038 .resume = be_eeh_resume,
6039};
6040
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006041static struct pci_driver be_driver = {
6042 .name = DRV_NAME,
6043 .id_table = be_dev_ids,
6044 .probe = be_probe,
6045 .remove = be_remove,
6046 .suspend = be_suspend,
Kalesh AP484d76f2015-02-23 04:20:14 -05006047 .resume = be_pci_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00006048 .shutdown = be_shutdown,
Vasundhara Volamace40af2015-03-04 00:44:34 -05006049 .sriov_configure = be_pci_sriov_configure,
Sathya Perlacf588472010-02-14 21:22:01 +00006050 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006051};
6052
6053static int __init be_init_module(void)
6054{
Joe Perches8e95a202009-12-03 07:58:21 +00006055 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6056 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006057 printk(KERN_WARNING DRV_NAME
6058 " : Module param rx_frag_size must be 2048/4096/8192."
6059 " Using 2048\n");
6060 rx_frag_size = 2048;
6061 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006062
Vasundhara Volamace40af2015-03-04 00:44:34 -05006063 if (num_vfs > 0) {
6064 pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6065 pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6066 }
6067
Sathya Perla6b7c5b92009-03-11 23:32:03 -07006068 return pci_register_driver(&be_driver);
6069}
6070module_init(be_init_module);
6071
6072static void __exit be_exit_module(void)
6073{
6074 pci_unregister_driver(&be_driver);
6075}
6076module_exit(be_exit_module);