blob: d02fbc7694ea73ba8f958ee5a00807d1c663b15f [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
Kalesh APe2fb1af2014-09-19 15:46:58 +053089
Ajit Khaparde7c185272010-07-29 06:16:33 +000090/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070091static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000092 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530113 "ECRC",
114 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700115 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000123 "Unknown"
124};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530129
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530138 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Joe Perchesede23fa82013-08-26 22:45:23 -0700146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 return 0;
151}
152
Somnath Kotur68c45a22013-03-14 02:42:07 +0000153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154{
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530158 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Somnath Kotur68c45a22013-03-14 02:42:07 +0000172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000194
195 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197}
198
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530212 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530215
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530234
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000271 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 }
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000290 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000291 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
dingtianhong61d23e92013-12-30 15:40:43 +0800296 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 status = -EPERM;
298 goto err;
299 }
300
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530302 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 return 0;
304err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 return status;
307}
308
Sathya Perlaca34fe32012-11-06 17:48:56 +0000309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
Ajit Khaparde61000862013-10-03 16:16:33 -0500440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530484 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500492}
493
Selvin Xavier005d5692011-05-16 07:36:35 +0000494static void populate_lancer_stats(struct be_adapter *adapter)
495{
Selvin Xavier005d5692011-05-16 07:36:35 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000528 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530
Sathya Perla09c1c682011-08-22 19:41:53 +0000531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
Jingoo Han4188e7d2013-08-05 18:02:02 +0900543static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530544 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000556void be_parse_stats(struct be_adapter *adapter)
557{
Ajit Khaparde61000862013-10-03 16:16:33 -0500558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559 struct be_rx_obj *rxo;
560 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000561 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000562
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else if (BE3_chip(adapter))
569 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 else
572 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573
Ajit Khaparde61000862013-10-03 16:16:33 -0500574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000578 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000579 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000580}
581
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530583 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000586 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000588 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 u64 pkts, bytes;
590 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 }
607
Sathya Perla3c8def92011-06-12 20:01:58 +0000608 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000609 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530610
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648}
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct net_device *netdev = adapter->netdev;
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000655 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530659 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
Sathya Perla3c8def92011-06-12 20:01:58 +0000665static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530666 u32 wrb_cnt, u32 copied, u32 gso_segs,
667 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668{
Sathya Perla3c8def92011-06-12 20:01:58 +0000669 struct be_tx_stats *stats = tx_stats(txo);
670
Sathya Perlaab1594e2011-07-25 19:10:15 +0000671 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_reqs++;
673 stats->tx_wrbs += wrb_cnt;
674 stats->tx_bytes += copied;
675 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000677 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000678 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
681/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000682static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530683 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700685 int cnt = (skb->len > skb->data_len);
686
687 cnt += skb_shinfo(skb)->nr_frags;
688
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* to account for hdr wrb */
690 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 if (lancer_chip(adapter) || !(cnt & 1)) {
692 *dummy = false;
693 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694 /* add a dummy to make it an even num */
695 cnt++;
696 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000697 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
699 return cnt;
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
704 wrb->frag_pa_hi = upper_32_bits(addr);
705 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
706 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000707 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708}
709
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530711 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000712{
713 u8 vlan_prio;
714 u16 vlan_tag;
715
716 vlan_tag = vlan_tx_tag_get(skb);
717 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 /* If vlan priority provided by OS is NOT in available bmap */
719 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
720 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
721 adapter->recommended_prio;
722
723 return vlan_tag;
724}
725
Sathya Perlac9c47142014-03-27 10:46:19 +0530726/* Used only for IP tunnel packets */
727static u16 skb_inner_ip_proto(struct sk_buff *skb)
728{
729 return (inner_ip_hdr(skb)->version == 4) ?
730 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
731}
732
733static u16 skb_ip_proto(struct sk_buff *skb)
734{
735 return (ip_hdr(skb)->version == 4) ?
736 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
737}
738
Somnath Koturcc4ce022010-10-21 07:11:14 -0700739static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530740 struct sk_buff *skb, u32 wrb_cnt, u32 len,
741 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742{
Sathya Perlac9c47142014-03-27 10:46:19 +0530743 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700744
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 memset(hdr, 0, sizeof(*hdr));
746
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530747 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000749 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530750 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
751 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000752 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530753 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530755 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530756 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530757 proto = skb_inner_ip_proto(skb);
758 } else {
759 proto = skb_ip_proto(skb);
760 }
761 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530762 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530763 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530764 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 }
766
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700767 if (vlan_tx_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530768 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000769 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530770 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 }
772
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000773 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530774 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
775 SET_TX_WRB_HDR_BITS(event, hdr, 1);
776 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
777 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778}
779
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000780static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530781 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000782{
783 dma_addr_t dma;
784
785 be_dws_le_to_cpu(wrb, sizeof(*wrb));
786
787 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000788 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000789 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000790 dma_unmap_single(dev, dma, wrb->frag_len,
791 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000792 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000793 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000794 }
795}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796
Sathya Perla3c8def92011-06-12 20:01:58 +0000797static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530798 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
799 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800{
Sathya Perla7101e112010-03-22 20:41:12 +0000801 dma_addr_t busaddr;
802 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000803 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 struct be_eth_wrb *wrb;
806 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000807 bool map_single = false;
808 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810 hdr = queue_head_node(txq);
811 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000812 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700815 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530816
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000817 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
818 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000819 goto dma_err;
820 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 wrb = queue_head_node(txq);
822 wrb_fill(wrb, busaddr, len);
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
825 copied += len;
826 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530829 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530830
Ian Campbellb061b392011-08-29 23:18:23 +0000831 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000833 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000834 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700835 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000836 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000839 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 }
841
842 if (dummy_wrb) {
843 wrb = queue_head_node(txq);
844 wrb_fill(wrb, 0, 0);
845 be_dws_cpu_to_le(wrb, sizeof(*wrb));
846 queue_head_inc(txq);
847 }
848
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000849 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 be_dws_cpu_to_le(hdr, sizeof(*hdr));
851
852 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000853dma_err:
854 txq->head = map_head;
855 while (copied) {
856 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000857 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000858 map_single = false;
859 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530860 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000861 queue_head_inc(txq);
862 }
863 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864}
865
Somnath Kotur93040ae2012-06-26 22:32:10 +0000866static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 struct sk_buff *skb,
868 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869{
870 u16 vlan_tag = 0;
871
872 skb = skb_share_check(skb, GFP_ATOMIC);
873 if (unlikely(!skb))
874 return skb;
875
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000876 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000877 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530878
879 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
880 if (!vlan_tag)
881 vlan_tag = adapter->pvid;
882 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
883 * skip VLAN insertion
884 */
885 if (skip_hw_vlan)
886 *skip_hw_vlan = true;
887 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000888
889 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100890 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
891 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000894 skb->vlan_tci = 0;
895 }
896
897 /* Insert the outer VLAN, if any */
898 if (adapter->qnq_vid) {
899 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100900 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
901 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000902 if (unlikely(!skb))
903 return skb;
904 if (skip_hw_vlan)
905 *skip_hw_vlan = true;
906 }
907
Somnath Kotur93040ae2012-06-26 22:32:10 +0000908 return skb;
909}
910
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000911static bool be_ipv6_exthdr_check(struct sk_buff *skb)
912{
913 struct ethhdr *eh = (struct ethhdr *)skb->data;
914 u16 offset = ETH_HLEN;
915
916 if (eh->h_proto == htons(ETH_P_IPV6)) {
917 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
918
919 offset += sizeof(struct ipv6hdr);
920 if (ip6h->nexthdr != NEXTHDR_TCP &&
921 ip6h->nexthdr != NEXTHDR_UDP) {
922 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530923 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000924
925 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
926 if (ehdr->hdrlen == 0xff)
927 return true;
928 }
929 }
930 return false;
931}
932
933static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
934{
935 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
936}
937
Sathya Perla748b5392014-05-09 13:29:13 +0530938static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000939{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000940 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000941}
942
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530943static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
944 struct sk_buff *skb,
945 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000947 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000948 unsigned int eth_hdr_len;
949 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000951 /* For padded packets, BE HW modifies tot_len field in IP header
952 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000953 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000954 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000955 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
956 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000957 if (skb->len <= 60 &&
958 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000959 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000960 ip = (struct iphdr *)ip_hdr(skb);
961 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
962 }
963
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000964 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530965 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000966 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530967 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000968 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530969 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000970
Somnath Kotur93040ae2012-06-26 22:32:10 +0000971 /* HW has a bug wherein it will calculate CSUM for VLAN
972 * pkts even though it is disabled.
973 * Manually insert VLAN in pkt.
974 */
975 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000976 vlan_tx_tag_present(skb)) {
977 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000978 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530979 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000980 }
981
982 /* HW may lockup when VLAN HW tagging is requested on
983 * certain ipv6 packets. Drop such pkts if the HW workaround to
984 * skip HW tagging is not enabled by FW.
985 */
986 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +0530987 (adapter->pvid || adapter->qnq_vid) &&
988 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000989 goto tx_drop;
990
991 /* Manual VLAN tag insertion to prevent:
992 * ASIC lockup when the ASIC inserts VLAN tag into
993 * certain ipv6 packets. Insert VLAN tags in driver,
994 * and set event, completion, vlan bits accordingly
995 * in the Tx WRB.
996 */
997 if (be_ipv6_tx_stall_chk(adapter, skb) &&
998 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000999 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001000 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301001 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001002 }
1003
Sathya Perlaee9c7992013-05-22 23:04:55 +00001004 return skb;
1005tx_drop:
1006 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301007err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001008 return NULL;
1009}
1010
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301011static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1012 struct sk_buff *skb,
1013 bool *skip_hw_vlan)
1014{
1015 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1016 * less may cause a transmit stall on that port. So the work-around is
1017 * to pad short packets (<= 32 bytes) to a 36-byte length.
1018 */
1019 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1020 if (skb_padto(skb, 36))
1021 return NULL;
1022 skb->len = 36;
1023 }
1024
1025 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1026 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1027 if (!skb)
1028 return NULL;
1029 }
1030
1031 return skb;
1032}
1033
Sathya Perlaee9c7992013-05-22 23:04:55 +00001034static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1035{
1036 struct be_adapter *adapter = netdev_priv(netdev);
1037 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1038 struct be_queue_info *txq = &txo->q;
1039 bool dummy_wrb, stopped = false;
1040 u32 wrb_cnt = 0, copied = 0;
1041 bool skip_hw_vlan = false;
1042 u32 start = txq->head;
1043
1044 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301045 if (!skb) {
1046 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001047 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301048 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001049
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001050 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001052 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1053 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001054 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001055 int gso_segs = skb_shinfo(skb)->gso_segs;
1056
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001057 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001058 BUG_ON(txo->sent_skb_list[start]);
1059 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001061 /* Ensure txq has space for the next skb; Else stop the queue
1062 * *BEFORE* ringing the tx doorbell, so that we serialze the
1063 * tx compls of the current transmit which'll wake up the queue
1064 */
Sathya Perla7101e112010-03-22 20:41:12 +00001065 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001066 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1067 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001068 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001069 stopped = true;
1070 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001072 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001073
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001074 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001075 } else {
1076 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301077 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001078 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001080 return NETDEV_TX_OK;
1081}
1082
1083static int be_change_mtu(struct net_device *netdev, int new_mtu)
1084{
1085 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301086 struct device *dev = &adapter->pdev->dev;
1087
1088 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1089 dev_info(dev, "MTU must be between %d and %d bytes\n",
1090 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001091 return -EINVAL;
1092 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301093
1094 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301095 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001096 netdev->mtu = new_mtu;
1097 return 0;
1098}
1099
1100/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001101 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1102 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 */
Sathya Perla10329df2012-06-05 19:37:18 +00001104static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105{
Vasundhara Volam50762662014-09-12 17:39:14 +05301106 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001107 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301108 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001109 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001110
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001111 /* No need to further configure vids if in promiscuous mode */
1112 if (adapter->promiscuous)
1113 return 0;
1114
Sathya Perla92bf14a2013-08-27 16:57:32 +05301115 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001116 goto set_vlan_promisc;
1117
1118 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301119 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1120 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001121
Kalesh AP4d567d92014-05-09 13:29:17 +05301122 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001123 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001124 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301125 if (addl_status(status) ==
1126 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001127 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301128 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001129 } else {
1130 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1131 /* hw VLAN filtering re-enabled. */
1132 status = be_cmd_rx_filter(adapter,
1133 BE_FLAGS_VLAN_PROMISC, OFF);
1134 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301135 dev_info(dev,
1136 "Disabling VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001138 }
1139 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001140 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001141
Sathya Perlab31c50a2009-09-17 10:30:13 -07001142 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001143
1144set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301145 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1146 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001147
1148 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1149 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301150 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001151 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1152 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301153 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001154 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001155}
1156
Patrick McHardy80d5c362013-04-19 02:04:28 +00001157static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158{
1159 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001160 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001162 /* Packets with VID 0 are always received by Lancer by default */
1163 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301164 return status;
1165
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301166 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301167 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001168
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301169 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301170 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001171
Somnath Kotura6b74e02014-01-21 15:50:55 +05301172 status = be_vid_config(adapter);
1173 if (status) {
1174 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301175 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301176 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301177
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001178 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179}
1180
Patrick McHardy80d5c362013-04-19 02:04:28 +00001181static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182{
1183 struct be_adapter *adapter = netdev_priv(netdev);
1184
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001185 /* Packets with VID 0 are always received by Lancer by default */
1186 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301187 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001188
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301189 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301190 adapter->vlans_added--;
1191
1192 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001193}
1194
Somnath kotur7ad09452014-03-03 14:24:43 +05301195static void be_clear_promisc(struct be_adapter *adapter)
1196{
1197 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301198 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301199
1200 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1201}
1202
Sathya Perlaa54769f2011-10-24 02:45:00 +00001203static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204{
1205 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001206 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207
1208 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001209 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001210 adapter->promiscuous = true;
1211 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001213
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001214 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001215 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301216 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001217 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001218 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001219 }
1220
Sathya Perlae7b909a2009-11-22 22:01:10 +00001221 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001222 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301223 netdev_mc_count(netdev) > be_max_mc(adapter))
1224 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001225
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001226 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1227 struct netdev_hw_addr *ha;
1228 int i = 1; /* First slot is claimed by the Primary MAC */
1229
1230 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1231 be_cmd_pmac_del(adapter, adapter->if_handle,
1232 adapter->pmac_id[i], 0);
1233 }
1234
Sathya Perla92bf14a2013-08-27 16:57:32 +05301235 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001236 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1237 adapter->promiscuous = true;
1238 goto done;
1239 }
1240
1241 netdev_for_each_uc_addr(ha, adapter->netdev) {
1242 adapter->uc_macs++; /* First slot is for Primary MAC */
1243 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1244 adapter->if_handle,
1245 &adapter->pmac_id[adapter->uc_macs], 0);
1246 }
1247 }
1248
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001249 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301250 if (!status) {
1251 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1252 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1253 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001254 }
Kalesh APa0794882014-05-30 19:06:23 +05301255
1256set_mcast_promisc:
1257 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1258 return;
1259
1260 /* Set to MCAST promisc mode if setting MULTICAST address fails
1261 * or if num configured exceeds what we support
1262 */
1263 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1264 if (!status)
1265 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001266done:
1267 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001268}
1269
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001270static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1271{
1272 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001273 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001274 int status;
1275
Sathya Perla11ac75e2011-12-13 00:58:50 +00001276 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001277 return -EPERM;
1278
Sathya Perla11ac75e2011-12-13 00:58:50 +00001279 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001280 return -EINVAL;
1281
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301282 /* Proceed further only if user provided MAC is different
1283 * from active MAC
1284 */
1285 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1286 return 0;
1287
Sathya Perla3175d8c2013-07-23 15:25:03 +05301288 if (BEx_chip(adapter)) {
1289 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1290 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001291
Sathya Perla11ac75e2011-12-13 00:58:50 +00001292 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1293 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301294 } else {
1295 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1296 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001297 }
1298
Kalesh APabccf232014-07-17 16:20:24 +05301299 if (status) {
1300 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1301 mac, vf, status);
1302 return be_cmd_status(status);
1303 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001304
Kalesh APabccf232014-07-17 16:20:24 +05301305 ether_addr_copy(vf_cfg->mac_addr, mac);
1306
1307 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001308}
1309
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001310static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301311 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001312{
1313 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001314 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001315
Sathya Perla11ac75e2011-12-13 00:58:50 +00001316 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001317 return -EPERM;
1318
Sathya Perla11ac75e2011-12-13 00:58:50 +00001319 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001320 return -EINVAL;
1321
1322 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001323 vi->max_tx_rate = vf_cfg->tx_rate;
1324 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001325 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1326 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001327 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301328 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001329
1330 return 0;
1331}
1332
Sathya Perla748b5392014-05-09 13:29:13 +05301333static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001334{
1335 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001336 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001337 int status = 0;
1338
Sathya Perla11ac75e2011-12-13 00:58:50 +00001339 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001340 return -EPERM;
1341
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001342 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001343 return -EINVAL;
1344
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001345 if (vlan || qos) {
1346 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301347 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001348 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1349 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001350 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001351 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301352 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1353 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001354 }
1355
Kalesh APabccf232014-07-17 16:20:24 +05301356 if (status) {
1357 dev_err(&adapter->pdev->dev,
1358 "VLAN %d config on VF %d failed : %#x\n", vlan,
1359 vf, status);
1360 return be_cmd_status(status);
1361 }
1362
1363 vf_cfg->vlan_tag = vlan;
1364
1365 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001366}
1367
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001368static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1369 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001370{
1371 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301372 struct device *dev = &adapter->pdev->dev;
1373 int percent_rate, status = 0;
1374 u16 link_speed = 0;
1375 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001376
Sathya Perla11ac75e2011-12-13 00:58:50 +00001377 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001378 return -EPERM;
1379
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001380 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001381 return -EINVAL;
1382
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001383 if (min_tx_rate)
1384 return -EINVAL;
1385
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301386 if (!max_tx_rate)
1387 goto config_qos;
1388
1389 status = be_cmd_link_status_query(adapter, &link_speed,
1390 &link_status, 0);
1391 if (status)
1392 goto err;
1393
1394 if (!link_status) {
1395 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301396 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301397 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001398 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001399
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301400 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1401 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1402 link_speed);
1403 status = -EINVAL;
1404 goto err;
1405 }
1406
1407 /* On Skyhawk the QOS setting must be done only as a % value */
1408 percent_rate = link_speed / 100;
1409 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1410 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1411 percent_rate);
1412 status = -EINVAL;
1413 goto err;
1414 }
1415
1416config_qos:
1417 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001418 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301419 goto err;
1420
1421 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1422 return 0;
1423
1424err:
1425 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1426 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301427 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001428}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301429
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301430static int be_set_vf_link_state(struct net_device *netdev, int vf,
1431 int link_state)
1432{
1433 struct be_adapter *adapter = netdev_priv(netdev);
1434 int status;
1435
1436 if (!sriov_enabled(adapter))
1437 return -EPERM;
1438
1439 if (vf >= adapter->num_vfs)
1440 return -EINVAL;
1441
1442 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301443 if (status) {
1444 dev_err(&adapter->pdev->dev,
1445 "Link state change on VF %d failed: %#x\n", vf, status);
1446 return be_cmd_status(status);
1447 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301448
Kalesh APabccf232014-07-17 16:20:24 +05301449 adapter->vf_cfg[vf].plink_tracking = link_state;
1450
1451 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301452}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001453
Sathya Perla2632baf2013-10-01 16:00:00 +05301454static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1455 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001456{
Sathya Perla2632baf2013-10-01 16:00:00 +05301457 aic->rx_pkts_prev = rx_pkts;
1458 aic->tx_reqs_prev = tx_pkts;
1459 aic->jiffies = now;
1460}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001461
Sathya Perla2632baf2013-10-01 16:00:00 +05301462static void be_eqd_update(struct be_adapter *adapter)
1463{
1464 struct be_set_eqd set_eqd[MAX_EVT_QS];
1465 int eqd, i, num = 0, start;
1466 struct be_aic_obj *aic;
1467 struct be_eq_obj *eqo;
1468 struct be_rx_obj *rxo;
1469 struct be_tx_obj *txo;
1470 u64 rx_pkts, tx_pkts;
1471 ulong now;
1472 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001473
Sathya Perla2632baf2013-10-01 16:00:00 +05301474 for_all_evt_queues(adapter, eqo, i) {
1475 aic = &adapter->aic_obj[eqo->idx];
1476 if (!aic->enable) {
1477 if (aic->jiffies)
1478 aic->jiffies = 0;
1479 eqd = aic->et_eqd;
1480 goto modify_eqd;
1481 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001482
Sathya Perla2632baf2013-10-01 16:00:00 +05301483 rxo = &adapter->rx_obj[eqo->idx];
1484 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001485 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301486 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001487 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001488
Sathya Perla2632baf2013-10-01 16:00:00 +05301489 txo = &adapter->tx_obj[eqo->idx];
1490 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001491 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301492 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001493 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001494
Sathya Perla2632baf2013-10-01 16:00:00 +05301495 /* Skip, if wrapped around or first calculation */
1496 now = jiffies;
1497 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1498 rx_pkts < aic->rx_pkts_prev ||
1499 tx_pkts < aic->tx_reqs_prev) {
1500 be_aic_update(aic, rx_pkts, tx_pkts, now);
1501 continue;
1502 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001503
Sathya Perla2632baf2013-10-01 16:00:00 +05301504 delta = jiffies_to_msecs(now - aic->jiffies);
1505 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1506 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1507 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001508
Sathya Perla2632baf2013-10-01 16:00:00 +05301509 if (eqd < 8)
1510 eqd = 0;
1511 eqd = min_t(u32, eqd, aic->max_eqd);
1512 eqd = max_t(u32, eqd, aic->min_eqd);
1513
1514 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001515modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301516 if (eqd != aic->prev_eqd) {
1517 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1518 set_eqd[num].eq_id = eqo->q.id;
1519 aic->prev_eqd = eqd;
1520 num++;
1521 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001522 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301523
1524 if (num)
1525 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001526}
1527
Sathya Perla3abcded2010-10-03 22:12:27 -07001528static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301529 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001530{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001531 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001532
Sathya Perlaab1594e2011-07-25 19:10:15 +00001533 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001534 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001535 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001536 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001537 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001538 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001539 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001540 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001541 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001542}
1543
Sathya Perla2e588f82011-03-11 02:49:26 +00001544static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001545{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001546 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301547 * Also ignore ipcksm for ipv6 pkts
1548 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001549 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301550 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001551}
1552
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301553static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001555 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001556 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001557 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301558 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559
Sathya Perla3abcded2010-10-03 22:12:27 -07001560 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001561 BUG_ON(!rx_page_info->page);
1562
Sathya Perlae50287b2014-03-04 12:14:38 +05301563 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001564 dma_unmap_page(&adapter->pdev->dev,
1565 dma_unmap_addr(rx_page_info, bus),
1566 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301567 rx_page_info->last_frag = false;
1568 } else {
1569 dma_sync_single_for_cpu(&adapter->pdev->dev,
1570 dma_unmap_addr(rx_page_info, bus),
1571 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001572 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301574 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575 atomic_dec(&rxq->used);
1576 return rx_page_info;
1577}
1578
1579/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001580static void be_rx_compl_discard(struct be_rx_obj *rxo,
1581 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001584 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001585
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001586 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301587 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001588 put_page(page_info->page);
1589 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001590 }
1591}
1592
1593/*
1594 * skb_fill_rx_data forms a complete skb for an ether frame
1595 * indicated by rxcp.
1596 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001597static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1598 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001600 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001601 u16 i, j;
1602 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001603 u8 *start;
1604
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301605 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001606 start = page_address(page_info->page) + page_info->page_offset;
1607 prefetch(start);
1608
1609 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001610 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001612 skb->len = curr_frag_len;
1613 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001614 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001615 /* Complete packet has now been moved to data */
1616 put_page(page_info->page);
1617 skb->data_len = 0;
1618 skb->tail += curr_frag_len;
1619 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001620 hdr_len = ETH_HLEN;
1621 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001623 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001624 skb_shinfo(skb)->frags[0].page_offset =
1625 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301626 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1627 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001629 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630 skb->tail += hdr_len;
1631 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001632 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001633
Sathya Perla2e588f82011-03-11 02:49:26 +00001634 if (rxcp->pkt_size <= rx_frag_size) {
1635 BUG_ON(rxcp->num_rcvd != 1);
1636 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001637 }
1638
1639 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001640 remaining = rxcp->pkt_size - curr_frag_len;
1641 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301642 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001643 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001644
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001645 /* Coalesce all frags from the same physical page in one slot */
1646 if (page_info->page_offset == 0) {
1647 /* Fresh page */
1648 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001649 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001650 skb_shinfo(skb)->frags[j].page_offset =
1651 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001652 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001653 skb_shinfo(skb)->nr_frags++;
1654 } else {
1655 put_page(page_info->page);
1656 }
1657
Eric Dumazet9e903e02011-10-18 21:00:24 +00001658 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659 skb->len += curr_frag_len;
1660 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001661 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001662 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001663 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001665 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001666}
1667
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001668/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301669static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001670 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001672 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001673 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001674 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001675
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001676 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001677 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001678 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001679 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680 return;
1681 }
1682
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001683 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001685 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001686 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001687 else
1688 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001689
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001690 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001691 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001692 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001693 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301694
Tom Herbertb6c0e892014-08-27 21:27:17 -07001695 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301696 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001697
Jiri Pirko343e43c2011-08-25 02:50:51 +00001698 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001699 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001700
1701 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702}
1703
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001704/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001705static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1706 struct napi_struct *napi,
1707 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001709 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001710 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001711 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001712 u16 remaining, curr_frag_len;
1713 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001714
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001715 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001716 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001717 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001718 return;
1719 }
1720
Sathya Perla2e588f82011-03-11 02:49:26 +00001721 remaining = rxcp->pkt_size;
1722 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301723 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724
1725 curr_frag_len = min(remaining, rx_frag_size);
1726
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001727 /* Coalesce all frags from the same physical page in one slot */
1728 if (i == 0 || page_info->page_offset == 0) {
1729 /* First frag or Fresh page */
1730 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001731 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001732 skb_shinfo(skb)->frags[j].page_offset =
1733 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001734 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001735 } else {
1736 put_page(page_info->page);
1737 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001738 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001739 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001741 memset(page_info, 0, sizeof(*page_info));
1742 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001743 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001745 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001746 skb->len = rxcp->pkt_size;
1747 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001748 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001749 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001750 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c8c2013-12-17 23:23:51 -08001751 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301752
Tom Herbertb6c0e892014-08-27 21:27:17 -07001753 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301754 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001755
Jiri Pirko343e43c2011-08-25 02:50:51 +00001756 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001757 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001758
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001759 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001760}
1761
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001762static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1763 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001764{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301765 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1766 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1767 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1768 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1769 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1770 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1771 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1772 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1773 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1774 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1775 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001776 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301777 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1778 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001779 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301780 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301781 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301782 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001783}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001784
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001785static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1786 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001787{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301788 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1789 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1790 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1791 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1792 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1793 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1794 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1795 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1796 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1797 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1798 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001799 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301800 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1801 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001802 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301803 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1804 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001805}
1806
1807static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1808{
1809 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1810 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1811 struct be_adapter *adapter = rxo->adapter;
1812
1813 /* For checking the valid bit it is Ok to use either definition as the
1814 * valid bit is at the same position in both v0 and v1 Rx compl */
1815 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001816 return NULL;
1817
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001818 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001819 be_dws_le_to_cpu(compl, sizeof(*compl));
1820
1821 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001822 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001823 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001824 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001825
Somnath Koture38b1702013-05-29 22:55:56 +00001826 if (rxcp->ip_frag)
1827 rxcp->l4_csum = 0;
1828
Sathya Perla15d72182011-03-21 20:49:26 +00001829 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301830 /* In QNQ modes, if qnq bit is not set, then the packet was
1831 * tagged only with the transparent outer vlan-tag and must
1832 * not be treated as a vlan packet by host
1833 */
1834 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001835 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001836
Sathya Perla15d72182011-03-21 20:49:26 +00001837 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001838 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001839
Somnath Kotur939cf302011-08-18 21:51:49 -07001840 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301841 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001842 rxcp->vlanf = 0;
1843 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001844
1845 /* As the compl has been parsed, reset it; we wont touch it again */
1846 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847
Sathya Perla3abcded2010-10-03 22:12:27 -07001848 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001849 return rxcp;
1850}
1851
Eric Dumazet1829b082011-03-01 05:48:12 +00001852static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001855
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001856 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001857 gfp |= __GFP_COMP;
1858 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859}
1860
1861/*
1862 * Allocate a page, split it to fragments of size rx_frag_size and post as
1863 * receive buffers to BE
1864 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301865static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866{
Sathya Perla3abcded2010-10-03 22:12:27 -07001867 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001868 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001869 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001871 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001872 struct be_eth_rx_d *rxd;
1873 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301874 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001875
Sathya Perla3abcded2010-10-03 22:12:27 -07001876 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301877 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001879 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001881 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882 break;
1883 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001884 page_dmaaddr = dma_map_page(dev, pagep, 0,
1885 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001886 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001887 if (dma_mapping_error(dev, page_dmaaddr)) {
1888 put_page(pagep);
1889 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301890 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001891 break;
1892 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301893 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001894 } else {
1895 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301896 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301898 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001900
1901 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301902 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1904 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905
1906 /* Any space left in the current big page for another frag? */
1907 if ((page_offset + rx_frag_size + rx_frag_size) >
1908 adapter->big_page_size) {
1909 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301910 page_info->last_frag = true;
1911 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1912 } else {
1913 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001915
1916 prev_page_info = page_info;
1917 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001918 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301920
1921 /* Mark the last frag of a page when we break out of the above loop
1922 * with no more slots available in the RXQ
1923 */
1924 if (pagep) {
1925 prev_page_info->last_frag = true;
1926 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1927 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928
1929 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001930 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301931 if (rxo->rx_post_starved)
1932 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301933 do {
1934 notify = min(256u, posted);
1935 be_rxq_notify(adapter, rxq->id, notify);
1936 posted -= notify;
1937 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001938 } else if (atomic_read(&rxq->used) == 0) {
1939 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001940 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942}
1943
Sathya Perla5fb379e2009-06-18 00:02:59 +00001944static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001946 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1947
1948 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1949 return NULL;
1950
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001951 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001952 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1953
1954 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1955
1956 queue_tail_inc(tx_cq);
1957 return txcp;
1958}
1959
Sathya Perla3c8def92011-06-12 20:01:58 +00001960static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301961 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001962{
Sathya Perla3c8def92011-06-12 20:01:58 +00001963 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001964 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001965 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001967 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1968 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001970 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001972 sent_skbs[txq->tail] = NULL;
1973
1974 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001975 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001977 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001978 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001979 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001980 unmap_tx_frag(&adapter->pdev->dev, wrb,
1981 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001982 unmap_skb_hdr = false;
1983
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984 num_wrbs++;
1985 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001986 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987
Rick Jones96d49222014-08-28 08:53:16 -07001988 dev_consume_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001989 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001990}
1991
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001992/* Return the number of events in the event queue */
1993static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001994{
1995 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001997
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001998 do {
1999 eqe = queue_tail_node(&eqo->q);
2000 if (eqe->evt == 0)
2001 break;
2002
2003 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002004 eqe->evt = 0;
2005 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002006 queue_tail_inc(&eqo->q);
2007 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002008
2009 return num;
2010}
2011
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002012/* Leaves the EQ is disarmed state */
2013static void be_eq_clean(struct be_eq_obj *eqo)
2014{
2015 int num = events_get(eqo);
2016
2017 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2018}
2019
2020static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002021{
2022 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002023 struct be_queue_info *rxq = &rxo->q;
2024 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002025 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002026 struct be_adapter *adapter = rxo->adapter;
2027 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028
Sathya Perlad23e9462012-12-17 19:38:51 +00002029 /* Consume pending rx completions.
2030 * Wait for the flush completion (identified by zero num_rcvd)
2031 * to arrive. Notify CQ even when there are no more CQ entries
2032 * for HW to flush partially coalesced CQ entries.
2033 * In Lancer, there is no need to wait for flush compl.
2034 */
2035 for (;;) {
2036 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302037 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002038 if (lancer_chip(adapter))
2039 break;
2040
2041 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2042 dev_warn(&adapter->pdev->dev,
2043 "did not receive flush compl\n");
2044 break;
2045 }
2046 be_cq_notify(adapter, rx_cq->id, true, 0);
2047 mdelay(1);
2048 } else {
2049 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002050 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002051 if (rxcp->num_rcvd == 0)
2052 break;
2053 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002054 }
2055
Sathya Perlad23e9462012-12-17 19:38:51 +00002056 /* After cleanup, leave the CQ in unarmed state */
2057 be_cq_notify(adapter, rx_cq->id, false, 0);
2058
2059 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302060 while (atomic_read(&rxq->used) > 0) {
2061 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002062 put_page(page_info->page);
2063 memset(page_info, 0, sizeof(*page_info));
2064 }
2065 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302066 rxq->tail = 0;
2067 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002068}
2069
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002070static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002071{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002072 struct be_tx_obj *txo;
2073 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002074 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002075 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002076 struct sk_buff *sent_skb;
2077 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002078 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002079
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302080 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002081 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002082 pending_txqs = adapter->num_tx_qs;
2083
2084 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302085 cmpl = 0;
2086 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002087 txq = &txo->q;
2088 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302089 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002090 num_wrbs += be_tx_compl_process(adapter, txo,
2091 end_idx);
2092 cmpl++;
2093 }
2094 if (cmpl) {
2095 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2096 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302097 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002098 }
2099 if (atomic_read(&txq->used) == 0)
2100 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002101 }
2102
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302103 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002104 break;
2105
2106 mdelay(1);
2107 } while (true);
2108
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002109 for_all_tx_queues(adapter, txo, i) {
2110 txq = &txo->q;
2111 if (atomic_read(&txq->used))
2112 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2113 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002114
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002115 /* free posted tx for which compls will never arrive */
2116 while (atomic_read(&txq->used)) {
2117 sent_skb = txo->sent_skb_list[txq->tail];
2118 end_idx = txq->tail;
2119 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2120 &dummy_wrb);
2121 index_adv(&end_idx, num_wrbs - 1, txq->len);
2122 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2123 atomic_sub(num_wrbs, &txq->used);
2124 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002125 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126}
2127
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002128static void be_evt_queues_destroy(struct be_adapter *adapter)
2129{
2130 struct be_eq_obj *eqo;
2131 int i;
2132
2133 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002134 if (eqo->q.created) {
2135 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002136 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302137 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302138 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002139 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002140 be_queue_free(adapter, &eqo->q);
2141 }
2142}
2143
2144static int be_evt_queues_create(struct be_adapter *adapter)
2145{
2146 struct be_queue_info *eq;
2147 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302148 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002149 int i, rc;
2150
Sathya Perla92bf14a2013-08-27 16:57:32 +05302151 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2152 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002153
2154 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302155 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2156 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302157 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302158 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002159 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002160 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302161 aic->max_eqd = BE_MAX_EQD;
2162 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002163
2164 eq = &eqo->q;
2165 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302166 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002167 if (rc)
2168 return rc;
2169
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302170 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002171 if (rc)
2172 return rc;
2173 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002174 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002175}
2176
Sathya Perla5fb379e2009-06-18 00:02:59 +00002177static void be_mcc_queues_destroy(struct be_adapter *adapter)
2178{
2179 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002180
Sathya Perla8788fdc2009-07-27 22:52:03 +00002181 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002182 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002183 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002184 be_queue_free(adapter, q);
2185
Sathya Perla8788fdc2009-07-27 22:52:03 +00002186 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002187 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002188 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002189 be_queue_free(adapter, q);
2190}
2191
2192/* Must be called only after TX qs are created as MCC shares TX EQ */
2193static int be_mcc_queues_create(struct be_adapter *adapter)
2194{
2195 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002196
Sathya Perla8788fdc2009-07-27 22:52:03 +00002197 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002198 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302199 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002200 goto err;
2201
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002202 /* Use the default EQ for MCC completions */
2203 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002204 goto mcc_cq_free;
2205
Sathya Perla8788fdc2009-07-27 22:52:03 +00002206 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002207 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2208 goto mcc_cq_destroy;
2209
Sathya Perla8788fdc2009-07-27 22:52:03 +00002210 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002211 goto mcc_q_free;
2212
2213 return 0;
2214
2215mcc_q_free:
2216 be_queue_free(adapter, q);
2217mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002218 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002219mcc_cq_free:
2220 be_queue_free(adapter, cq);
2221err:
2222 return -1;
2223}
2224
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225static void be_tx_queues_destroy(struct be_adapter *adapter)
2226{
2227 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002228 struct be_tx_obj *txo;
2229 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230
Sathya Perla3c8def92011-06-12 20:01:58 +00002231 for_all_tx_queues(adapter, txo, i) {
2232 q = &txo->q;
2233 if (q->created)
2234 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2235 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236
Sathya Perla3c8def92011-06-12 20:01:58 +00002237 q = &txo->cq;
2238 if (q->created)
2239 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2240 be_queue_free(adapter, q);
2241 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002242}
2243
Sathya Perla77071332013-08-27 16:57:34 +05302244static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002245{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002246 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002247 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302248 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002249
Sathya Perla92bf14a2013-08-27 16:57:32 +05302250 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002251
Sathya Perla3c8def92011-06-12 20:01:58 +00002252 for_all_tx_queues(adapter, txo, i) {
2253 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002254 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2255 sizeof(struct be_eth_tx_compl));
2256 if (status)
2257 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258
John Stultz827da442013-10-07 15:51:58 -07002259 u64_stats_init(&txo->stats.sync);
2260 u64_stats_init(&txo->stats.sync_compl);
2261
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002262 /* If num_evt_qs is less than num_tx_qs, then more than
2263 * one txq share an eq
2264 */
2265 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2266 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2267 if (status)
2268 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002269
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002270 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2271 sizeof(struct be_eth_wrb));
2272 if (status)
2273 return status;
2274
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002275 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002276 if (status)
2277 return status;
2278 }
2279
Sathya Perlad3791422012-09-28 04:39:44 +00002280 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2281 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002282 return 0;
2283}
2284
2285static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002286{
2287 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002288 struct be_rx_obj *rxo;
2289 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290
Sathya Perla3abcded2010-10-03 22:12:27 -07002291 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002292 q = &rxo->cq;
2293 if (q->created)
2294 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2295 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002297}
2298
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002300{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002301 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002302 struct be_rx_obj *rxo;
2303 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304
Sathya Perla92bf14a2013-08-27 16:57:32 +05302305 /* We can create as many RSS rings as there are EQs. */
2306 adapter->num_rx_qs = adapter->num_evt_qs;
2307
2308 /* We'll use RSS only if atleast 2 RSS rings are supported.
2309 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002310 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302311 if (adapter->num_rx_qs > 1)
2312 adapter->num_rx_qs++;
2313
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002314 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 for_all_rx_queues(adapter, rxo, i) {
2316 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002317 cq = &rxo->cq;
2318 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302319 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002320 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002321 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002322
John Stultz827da442013-10-07 15:51:58 -07002323 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002324 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2325 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002326 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002327 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002328 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002329
Sathya Perlad3791422012-09-28 04:39:44 +00002330 dev_info(&adapter->pdev->dev,
2331 "created %d RSS queue(s) and 1 default RX queue\n",
2332 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002333 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002334}
2335
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002336static irqreturn_t be_intx(int irq, void *dev)
2337{
Sathya Perlae49cc342012-11-27 19:50:02 +00002338 struct be_eq_obj *eqo = dev;
2339 struct be_adapter *adapter = eqo->adapter;
2340 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002341
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002342 /* IRQ is not expected when NAPI is scheduled as the EQ
2343 * will not be armed.
2344 * But, this can happen on Lancer INTx where it takes
2345 * a while to de-assert INTx or in BE2 where occasionaly
2346 * an interrupt may be raised even when EQ is unarmed.
2347 * If NAPI is already scheduled, then counting & notifying
2348 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002349 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002350 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002351 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002352 __napi_schedule(&eqo->napi);
2353 if (num_evts)
2354 eqo->spurious_intr = 0;
2355 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002356 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002357
2358 /* Return IRQ_HANDLED only for the the first spurious intr
2359 * after a valid intr to stop the kernel from branding
2360 * this irq as a bad one!
2361 */
2362 if (num_evts || eqo->spurious_intr++ == 0)
2363 return IRQ_HANDLED;
2364 else
2365 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002366}
2367
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002368static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002370 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002371
Sathya Perla0b545a62012-11-23 00:27:18 +00002372 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2373 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002374 return IRQ_HANDLED;
2375}
2376
Sathya Perla2e588f82011-03-11 02:49:26 +00002377static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378{
Somnath Koture38b1702013-05-29 22:55:56 +00002379 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002380}
2381
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002382static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302383 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002384{
Sathya Perla3abcded2010-10-03 22:12:27 -07002385 struct be_adapter *adapter = rxo->adapter;
2386 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002387 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302389 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002390
2391 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002392 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002393 if (!rxcp)
2394 break;
2395
Sathya Perla12004ae2011-08-02 19:57:46 +00002396 /* Is it a flush compl that has no data */
2397 if (unlikely(rxcp->num_rcvd == 0))
2398 goto loop_continue;
2399
2400 /* Discard compl with partial DMA Lancer B0 */
2401 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002402 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002403 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002404 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002405
Sathya Perla12004ae2011-08-02 19:57:46 +00002406 /* On BE drop pkts that arrive due to imperfect filtering in
2407 * promiscuous mode on some skews
2408 */
2409 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302410 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002411 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002412 goto loop_continue;
2413 }
2414
Sathya Perla6384a4d2013-10-25 10:40:16 +05302415 /* Don't do gro when we're busy_polling */
2416 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002417 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002418 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302419 be_rx_compl_process(rxo, napi, rxcp);
2420
Sathya Perla12004ae2011-08-02 19:57:46 +00002421loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302422 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002423 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002424 }
2425
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002426 if (work_done) {
2427 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002428
Sathya Perla6384a4d2013-10-25 10:40:16 +05302429 /* When an rx-obj gets into post_starved state, just
2430 * let be_worker do the posting.
2431 */
2432 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2433 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302434 be_post_rx_frags(rxo, GFP_ATOMIC,
2435 max_t(u32, MAX_RX_POST,
2436 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002437 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002438
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002439 return work_done;
2440}
2441
Kalesh AP512bb8a2014-09-02 09:56:49 +05302442static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2443{
2444 switch (status) {
2445 case BE_TX_COMP_HDR_PARSE_ERR:
2446 tx_stats(txo)->tx_hdr_parse_err++;
2447 break;
2448 case BE_TX_COMP_NDMA_ERR:
2449 tx_stats(txo)->tx_dma_err++;
2450 break;
2451 case BE_TX_COMP_ACL_ERR:
2452 tx_stats(txo)->tx_spoof_check_err++;
2453 break;
2454 }
2455}
2456
2457static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2458{
2459 switch (status) {
2460 case LANCER_TX_COMP_LSO_ERR:
2461 tx_stats(txo)->tx_tso_err++;
2462 break;
2463 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2464 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2465 tx_stats(txo)->tx_spoof_check_err++;
2466 break;
2467 case LANCER_TX_COMP_QINQ_ERR:
2468 tx_stats(txo)->tx_qinq_err++;
2469 break;
2470 case LANCER_TX_COMP_PARITY_ERR:
2471 tx_stats(txo)->tx_internal_parity_err++;
2472 break;
2473 case LANCER_TX_COMP_DMA_ERR:
2474 tx_stats(txo)->tx_dma_err++;
2475 break;
2476 }
2477}
2478
Sathya Perlac8f64612014-09-02 09:56:55 +05302479static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2480 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002482 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302483 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302484 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302485 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002486
Sathya Perlac8f64612014-09-02 09:56:55 +05302487 while ((txcp = be_tx_compl_get(&txo->cq))) {
2488 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2489 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2490 work_done++;
2491
Kalesh AP512bb8a2014-09-02 09:56:49 +05302492 compl_status = GET_TX_COMPL_BITS(status, txcp);
2493 if (compl_status) {
2494 if (lancer_chip(adapter))
2495 lancer_update_tx_err(txo, compl_status);
2496 else
2497 be_update_tx_err(txo, compl_status);
2498 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002499 }
2500
2501 if (work_done) {
2502 be_cq_notify(adapter, txo->cq.id, true, work_done);
2503 atomic_sub(num_wrbs, &txo->q.used);
2504
2505 /* As Tx wrbs have been freed up, wake up netdev queue
2506 * if it was stopped due to lack of tx wrbs. */
2507 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302508 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002509 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002510 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002511
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002512 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2513 tx_stats(txo)->tx_compl += work_done;
2514 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2515 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002516}
Sathya Perla3c8def92011-06-12 20:01:58 +00002517
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302518int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002519{
2520 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2521 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002522 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302523 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302524 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002525
Sathya Perla0b545a62012-11-23 00:27:18 +00002526 num_evts = events_get(eqo);
2527
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302528 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2529 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002530
Sathya Perla6384a4d2013-10-25 10:40:16 +05302531 if (be_lock_napi(eqo)) {
2532 /* This loop will iterate twice for EQ0 in which
2533 * completions of the last RXQ (default one) are also processed
2534 * For other EQs the loop iterates only once
2535 */
2536 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2537 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2538 max_work = max(work, max_work);
2539 }
2540 be_unlock_napi(eqo);
2541 } else {
2542 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002543 }
2544
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002545 if (is_mcc_eqo(eqo))
2546 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002547
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002548 if (max_work < budget) {
2549 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002550 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002551 } else {
2552 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002553 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002554 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002555 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002556}
2557
Sathya Perla6384a4d2013-10-25 10:40:16 +05302558#ifdef CONFIG_NET_RX_BUSY_POLL
2559static int be_busy_poll(struct napi_struct *napi)
2560{
2561 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2562 struct be_adapter *adapter = eqo->adapter;
2563 struct be_rx_obj *rxo;
2564 int i, work = 0;
2565
2566 if (!be_lock_busy_poll(eqo))
2567 return LL_FLUSH_BUSY;
2568
2569 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2570 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2571 if (work)
2572 break;
2573 }
2574
2575 be_unlock_busy_poll(eqo);
2576 return work;
2577}
2578#endif
2579
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002580void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002581{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002582 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2583 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002584 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302585 bool error_detected = false;
2586 struct device *dev = &adapter->pdev->dev;
2587 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002588
Sathya Perlad23e9462012-12-17 19:38:51 +00002589 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002590 return;
2591
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002592 if (lancer_chip(adapter)) {
2593 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2594 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2595 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302596 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002597 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302598 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302599 adapter->hw_error = true;
2600 /* Do not log error messages if its a FW reset */
2601 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2602 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2603 dev_info(dev, "Firmware update in progress\n");
2604 } else {
2605 error_detected = true;
2606 dev_err(dev, "Error detected in the card\n");
2607 dev_err(dev, "ERR: sliport status 0x%x\n",
2608 sliport_status);
2609 dev_err(dev, "ERR: sliport error1 0x%x\n",
2610 sliport_err1);
2611 dev_err(dev, "ERR: sliport error2 0x%x\n",
2612 sliport_err2);
2613 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002614 }
2615 } else {
2616 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302617 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002618 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302619 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002620 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302621 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002622 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302623 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002624
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002625 ue_lo = (ue_lo & ~ue_lo_mask);
2626 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002627
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302628 /* On certain platforms BE hardware can indicate spurious UEs.
2629 * Allow HW to stop working completely in case of a real UE.
2630 * Hence not setting the hw_error for UE detection.
2631 */
2632
2633 if (ue_lo || ue_hi) {
2634 error_detected = true;
2635 dev_err(dev,
2636 "Unrecoverable Error detected in the adapter");
2637 dev_err(dev, "Please reboot server to recover");
2638 if (skyhawk_chip(adapter))
2639 adapter->hw_error = true;
2640 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2641 if (ue_lo & 1)
2642 dev_err(dev, "UE: %s bit set\n",
2643 ue_status_low_desc[i]);
2644 }
2645 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2646 if (ue_hi & 1)
2647 dev_err(dev, "UE: %s bit set\n",
2648 ue_status_hi_desc[i]);
2649 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302650 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002651 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302652 if (error_detected)
2653 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002654}
2655
Sathya Perla8d56ff12009-11-22 22:02:26 +00002656static void be_msix_disable(struct be_adapter *adapter)
2657{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002658 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002659 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002660 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302661 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002662 }
2663}
2664
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002665static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002666{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002667 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002668 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002669
Sathya Perla92bf14a2013-08-27 16:57:32 +05302670 /* If RoCE is supported, program the max number of NIC vectors that
2671 * may be configured via set-channels, along with vectors needed for
2672 * RoCe. Else, just program the number we'll use initially.
2673 */
2674 if (be_roce_supported(adapter))
2675 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2676 2 * num_online_cpus());
2677 else
2678 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002679
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002680 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002681 adapter->msix_entries[i].entry = i;
2682
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002683 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2684 MIN_MSIX_VECTORS, num_vec);
2685 if (num_vec < 0)
2686 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002687
Sathya Perla92bf14a2013-08-27 16:57:32 +05302688 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2689 adapter->num_msix_roce_vec = num_vec / 2;
2690 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2691 adapter->num_msix_roce_vec);
2692 }
2693
2694 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2695
2696 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2697 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002698 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002699
2700fail:
2701 dev_warn(dev, "MSIx enable failed\n");
2702
2703 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2704 if (!be_physfn(adapter))
2705 return num_vec;
2706 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002707}
2708
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002709static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302710 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002711{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302712 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002713}
2714
2715static int be_msix_register(struct be_adapter *adapter)
2716{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002717 struct net_device *netdev = adapter->netdev;
2718 struct be_eq_obj *eqo;
2719 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002720
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002721 for_all_evt_queues(adapter, eqo, i) {
2722 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2723 vec = be_msix_vec_get(adapter, eqo);
2724 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002725 if (status)
2726 goto err_msix;
2727 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002728
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002729 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002730err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002731 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2732 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2733 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302734 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002735 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002736 return status;
2737}
2738
2739static int be_irq_register(struct be_adapter *adapter)
2740{
2741 struct net_device *netdev = adapter->netdev;
2742 int status;
2743
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002744 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002745 status = be_msix_register(adapter);
2746 if (status == 0)
2747 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002748 /* INTx is not supported for VF */
2749 if (!be_physfn(adapter))
2750 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002751 }
2752
Sathya Perlae49cc342012-11-27 19:50:02 +00002753 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002754 netdev->irq = adapter->pdev->irq;
2755 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002756 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002757 if (status) {
2758 dev_err(&adapter->pdev->dev,
2759 "INTx request IRQ failed - err %d\n", status);
2760 return status;
2761 }
2762done:
2763 adapter->isr_registered = true;
2764 return 0;
2765}
2766
2767static void be_irq_unregister(struct be_adapter *adapter)
2768{
2769 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002770 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002771 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002772
2773 if (!adapter->isr_registered)
2774 return;
2775
2776 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002777 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002778 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002779 goto done;
2780 }
2781
2782 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002783 for_all_evt_queues(adapter, eqo, i)
2784 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002785
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786done:
2787 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002788}
2789
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002790static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002791{
2792 struct be_queue_info *q;
2793 struct be_rx_obj *rxo;
2794 int i;
2795
2796 for_all_rx_queues(adapter, rxo, i) {
2797 q = &rxo->q;
2798 if (q->created) {
2799 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002800 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002801 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002802 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002803 }
2804}
2805
Sathya Perla889cd4b2010-05-30 23:33:45 +00002806static int be_close(struct net_device *netdev)
2807{
2808 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002809 struct be_eq_obj *eqo;
2810 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002811
Kalesh APe1ad8e32014-04-14 16:12:41 +05302812 /* This protection is needed as be_close() may be called even when the
2813 * adapter is in cleared state (after eeh perm failure)
2814 */
2815 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2816 return 0;
2817
Parav Pandit045508a2012-03-26 14:27:13 +00002818 be_roce_dev_close(adapter);
2819
Ivan Veceradff345c52013-11-27 08:59:32 +01002820 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2821 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002822 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302823 be_disable_busy_poll(eqo);
2824 }
David S. Miller71237b62013-11-28 18:53:36 -05002825 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002826 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002827
2828 be_async_mcc_disable(adapter);
2829
2830 /* Wait for all pending tx completions to arrive so that
2831 * all tx skbs are freed.
2832 */
Sathya Perlafba87552013-05-08 02:05:50 +00002833 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302834 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002835
2836 be_rx_qs_destroy(adapter);
2837
Ajit Khaparded11a3472013-11-18 10:44:37 -06002838 for (i = 1; i < (adapter->uc_macs + 1); i++)
2839 be_cmd_pmac_del(adapter, adapter->if_handle,
2840 adapter->pmac_id[i], 0);
2841 adapter->uc_macs = 0;
2842
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002843 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002844 if (msix_enabled(adapter))
2845 synchronize_irq(be_msix_vec_get(adapter, eqo));
2846 else
2847 synchronize_irq(netdev->irq);
2848 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002849 }
2850
Sathya Perla889cd4b2010-05-30 23:33:45 +00002851 be_irq_unregister(adapter);
2852
Sathya Perla482c9e72011-06-29 23:33:17 +00002853 return 0;
2854}
2855
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002856static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002857{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002858 struct rss_info *rss = &adapter->rss_info;
2859 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00002860 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002861 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00002862
2863 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002864 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2865 sizeof(struct be_eth_rx_d));
2866 if (rc)
2867 return rc;
2868 }
2869
2870 /* The FW would like the default RXQ to be created first */
2871 rxo = default_rxo(adapter);
2872 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2873 adapter->if_handle, false, &rxo->rss_id);
2874 if (rc)
2875 return rc;
2876
2877 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002878 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002879 rx_frag_size, adapter->if_handle,
2880 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002881 if (rc)
2882 return rc;
2883 }
2884
2885 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302886 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2887 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002888 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302889 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002890 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302891 rss->rsstable[j + i] = rxo->rss_id;
2892 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002893 }
2894 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302895 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2896 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002897
2898 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302899 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2900 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302901 } else {
2902 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302903 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302904 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002905
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002906 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302907 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002908 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302909 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302910 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302911 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002912 }
2913
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002914 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05302915
Sathya Perla482c9e72011-06-29 23:33:17 +00002916 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002917 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302918 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002919 return 0;
2920}
2921
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002922static int be_open(struct net_device *netdev)
2923{
2924 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002925 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002926 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002927 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002928 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002929 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002930
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002931 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002932 if (status)
2933 goto err;
2934
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002935 status = be_irq_register(adapter);
2936 if (status)
2937 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002938
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002939 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002940 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002941
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002942 for_all_tx_queues(adapter, txo, i)
2943 be_cq_notify(adapter, txo->cq.id, true, 0);
2944
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002945 be_async_mcc_enable(adapter);
2946
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002947 for_all_evt_queues(adapter, eqo, i) {
2948 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302949 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302950 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002951 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002952 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002953
Sathya Perla323ff712012-09-28 04:39:43 +00002954 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002955 if (!status)
2956 be_link_status_update(adapter, link_status);
2957
Sathya Perlafba87552013-05-08 02:05:50 +00002958 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002959 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302960
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302961#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302962 if (skyhawk_chip(adapter))
2963 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302964#endif
2965
Sathya Perla889cd4b2010-05-30 23:33:45 +00002966 return 0;
2967err:
2968 be_close(adapter->netdev);
2969 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002970}
2971
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002972static int be_setup_wol(struct be_adapter *adapter, bool enable)
2973{
2974 struct be_dma_mem cmd;
2975 int status = 0;
2976 u8 mac[ETH_ALEN];
2977
2978 memset(mac, 0, ETH_ALEN);
2979
2980 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa82013-08-26 22:45:23 -07002981 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2982 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302983 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302984 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002985
2986 if (enable) {
2987 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302988 PCICFG_PM_CONTROL_OFFSET,
2989 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002990 if (status) {
2991 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002992 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002993 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2994 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002995 return status;
2996 }
2997 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302998 adapter->netdev->dev_addr,
2999 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003000 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3001 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3002 } else {
3003 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3004 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3005 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3006 }
3007
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003008 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003009 return status;
3010}
3011
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003012/*
3013 * Generate a seed MAC address from the PF MAC Address using jhash.
3014 * MAC Address for VFs are assigned incrementally starting from the seed.
3015 * These addresses are programmed in the ASIC by the PF and the VF driver
3016 * queries for the MAC address during its probe.
3017 */
Sathya Perla4c876612013-02-03 20:30:11 +00003018static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003019{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003020 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003021 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003022 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003023 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003024
3025 be_vf_eth_addr_generate(adapter, mac);
3026
Sathya Perla11ac75e2011-12-13 00:58:50 +00003027 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303028 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003029 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003030 vf_cfg->if_handle,
3031 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303032 else
3033 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3034 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003035
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003036 if (status)
3037 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303038 "Mac address assignment failed for VF %d\n",
3039 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003040 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003041 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003042
3043 mac[5] += 1;
3044 }
3045 return status;
3046}
3047
Sathya Perla4c876612013-02-03 20:30:11 +00003048static int be_vfs_mac_query(struct be_adapter *adapter)
3049{
3050 int status, vf;
3051 u8 mac[ETH_ALEN];
3052 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003053
3054 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303055 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3056 mac, vf_cfg->if_handle,
3057 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003058 if (status)
3059 return status;
3060 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3061 }
3062 return 0;
3063}
3064
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003065static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003066{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003067 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003068 u32 vf;
3069
Sathya Perla257a3fe2013-06-14 15:54:51 +05303070 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003071 dev_warn(&adapter->pdev->dev,
3072 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003073 goto done;
3074 }
3075
Sathya Perlab4c1df92013-05-08 02:05:47 +00003076 pci_disable_sriov(adapter->pdev);
3077
Sathya Perla11ac75e2011-12-13 00:58:50 +00003078 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303079 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003080 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3081 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303082 else
3083 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3084 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003085
Sathya Perla11ac75e2011-12-13 00:58:50 +00003086 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3087 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003088done:
3089 kfree(adapter->vf_cfg);
3090 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303091 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003092}
3093
Sathya Perla77071332013-08-27 16:57:34 +05303094static void be_clear_queues(struct be_adapter *adapter)
3095{
3096 be_mcc_queues_destroy(adapter);
3097 be_rx_cqs_destroy(adapter);
3098 be_tx_queues_destroy(adapter);
3099 be_evt_queues_destroy(adapter);
3100}
3101
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303102static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003103{
Sathya Perla191eb752012-02-23 18:50:13 +00003104 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3105 cancel_delayed_work_sync(&adapter->work);
3106 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3107 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303108}
3109
Somnath Koturb05004a2013-12-05 12:08:16 +05303110static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303111{
3112 int i;
3113
Somnath Koturb05004a2013-12-05 12:08:16 +05303114 if (adapter->pmac_id) {
3115 for (i = 0; i < (adapter->uc_macs + 1); i++)
3116 be_cmd_pmac_del(adapter, adapter->if_handle,
3117 adapter->pmac_id[i], 0);
3118 adapter->uc_macs = 0;
3119
3120 kfree(adapter->pmac_id);
3121 adapter->pmac_id = NULL;
3122 }
3123}
3124
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303125#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303126static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3127{
3128 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3129 be_cmd_manage_iface(adapter, adapter->if_handle,
3130 OP_CONVERT_TUNNEL_TO_NORMAL);
3131
3132 if (adapter->vxlan_port)
3133 be_cmd_set_vxlan_port(adapter, 0);
3134
3135 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3136 adapter->vxlan_port = 0;
3137}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303138#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303139
Somnath Koturb05004a2013-12-05 12:08:16 +05303140static int be_clear(struct be_adapter *adapter)
3141{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303142 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003143
Sathya Perla11ac75e2011-12-13 00:58:50 +00003144 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003145 be_vf_clear(adapter);
3146
Vasundhara Volambec84e62014-06-30 13:01:32 +05303147 /* Re-configure FW to distribute resources evenly across max-supported
3148 * number of VFs, only when VFs are not already enabled.
3149 */
3150 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3151 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3152 pci_sriov_get_totalvfs(adapter->pdev));
3153
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303154#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303155 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303156#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303157 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303158 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003159
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003160 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003161
Sathya Perla77071332013-08-27 16:57:34 +05303162 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003163
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003164 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303165 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003166 return 0;
3167}
3168
Sathya Perla4c876612013-02-03 20:30:11 +00003169static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003170{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303171 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003172 struct be_vf_cfg *vf_cfg;
3173 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003174 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003175
Sathya Perla4c876612013-02-03 20:30:11 +00003176 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3177 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003178
Sathya Perla4c876612013-02-03 20:30:11 +00003179 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303180 if (!BE3_chip(adapter)) {
3181 status = be_cmd_get_profile_config(adapter, &res,
3182 vf + 1);
3183 if (!status)
3184 cap_flags = res.if_cap_flags;
3185 }
Sathya Perla4c876612013-02-03 20:30:11 +00003186
3187 /* If a FW profile exists, then cap_flags are updated */
3188 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303189 BE_IF_FLAGS_BROADCAST |
3190 BE_IF_FLAGS_MULTICAST);
3191 status =
3192 be_cmd_if_create(adapter, cap_flags, en_flags,
3193 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003194 if (status)
3195 goto err;
3196 }
3197err:
3198 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003199}
3200
Sathya Perla39f1d942012-05-08 19:41:24 +00003201static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003202{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003203 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003204 int vf;
3205
Sathya Perla39f1d942012-05-08 19:41:24 +00003206 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3207 GFP_KERNEL);
3208 if (!adapter->vf_cfg)
3209 return -ENOMEM;
3210
Sathya Perla11ac75e2011-12-13 00:58:50 +00003211 for_all_vfs(adapter, vf_cfg, vf) {
3212 vf_cfg->if_handle = -1;
3213 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003214 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003215 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003216}
3217
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003218static int be_vf_setup(struct be_adapter *adapter)
3219{
Sathya Perla4c876612013-02-03 20:30:11 +00003220 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303221 struct be_vf_cfg *vf_cfg;
3222 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303223 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003224
Sathya Perla257a3fe2013-06-14 15:54:51 +05303225 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003226
3227 status = be_vf_setup_init(adapter);
3228 if (status)
3229 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003230
Sathya Perla4c876612013-02-03 20:30:11 +00003231 if (old_vfs) {
3232 for_all_vfs(adapter, vf_cfg, vf) {
3233 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3234 if (status)
3235 goto err;
3236 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003237
Sathya Perla4c876612013-02-03 20:30:11 +00003238 status = be_vfs_mac_query(adapter);
3239 if (status)
3240 goto err;
3241 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303242 status = be_vfs_if_create(adapter);
3243 if (status)
3244 goto err;
3245
Sathya Perla39f1d942012-05-08 19:41:24 +00003246 status = be_vf_eth_addr_config(adapter);
3247 if (status)
3248 goto err;
3249 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003250
Sathya Perla11ac75e2011-12-13 00:58:50 +00003251 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303252 /* Allow VFs to programs MAC/VLAN filters */
3253 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3254 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3255 status = be_cmd_set_fn_privileges(adapter,
3256 privileges |
3257 BE_PRIV_FILTMGMT,
3258 vf + 1);
3259 if (!status)
3260 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3261 vf);
3262 }
3263
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303264 /* Allow full available bandwidth */
3265 if (!old_vfs)
3266 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003267
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303268 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303269 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303270 be_cmd_set_logical_link_config(adapter,
3271 IFLA_VF_LINK_STATE_AUTO,
3272 vf+1);
3273 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003274 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003275
3276 if (!old_vfs) {
3277 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3278 if (status) {
3279 dev_err(dev, "SRIOV enable failed\n");
3280 adapter->num_vfs = 0;
3281 goto err;
3282 }
3283 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303284
3285 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003286 return 0;
3287err:
Sathya Perla4c876612013-02-03 20:30:11 +00003288 dev_err(dev, "VF setup failed\n");
3289 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003290 return status;
3291}
3292
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303293/* Converting function_mode bits on BE3 to SH mc_type enums */
3294
3295static u8 be_convert_mc_type(u32 function_mode)
3296{
Suresh Reddy66064db2014-06-23 16:41:29 +05303297 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303298 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303299 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303300 return FLEX10;
3301 else if (function_mode & VNIC_MODE)
3302 return vNIC2;
3303 else if (function_mode & UMC_ENABLED)
3304 return UMC;
3305 else
3306 return MC_NONE;
3307}
3308
Sathya Perla92bf14a2013-08-27 16:57:32 +05303309/* On BE2/BE3 FW does not suggest the supported limits */
3310static void BEx_get_resources(struct be_adapter *adapter,
3311 struct be_resources *res)
3312{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303313 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303314
3315 if (be_physfn(adapter))
3316 res->max_uc_mac = BE_UC_PMAC_COUNT;
3317 else
3318 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3319
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303320 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3321
3322 if (be_is_mc(adapter)) {
3323 /* Assuming that there are 4 channels per port,
3324 * when multi-channel is enabled
3325 */
3326 if (be_is_qnq_mode(adapter))
3327 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3328 else
3329 /* In a non-qnq multichannel mode, the pvid
3330 * takes up one vlan entry
3331 */
3332 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3333 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303334 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303335 }
3336
Sathya Perla92bf14a2013-08-27 16:57:32 +05303337 res->max_mcast_mac = BE_MAX_MC;
3338
Vasundhara Volama5243da2014-03-11 18:53:07 +05303339 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3340 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3341 * *only* if it is RSS-capable.
3342 */
3343 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3344 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303345 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303346 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303347 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3348 struct be_resources super_nic_res = {0};
3349
3350 /* On a SuperNIC profile, the driver needs to use the
3351 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3352 */
3353 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3354 /* Some old versions of BE3 FW don't report max_tx_qs value */
3355 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3356 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303357 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303358 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303359
3360 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3361 !use_sriov && be_physfn(adapter))
3362 res->max_rss_qs = (adapter->be3_native) ?
3363 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3364 res->max_rx_qs = res->max_rss_qs + 1;
3365
Suresh Reddye3dc8672014-01-06 13:02:25 +05303366 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303367 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303368 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3369 else
3370 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303371
3372 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3373 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3374 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3375}
3376
Sathya Perla30128032011-11-10 19:17:57 +00003377static void be_setup_init(struct be_adapter *adapter)
3378{
3379 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003380 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003381 adapter->if_handle = -1;
3382 adapter->be3_native = false;
3383 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003384 if (be_physfn(adapter))
3385 adapter->cmd_privileges = MAX_PRIVILEGES;
3386 else
3387 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003388}
3389
Vasundhara Volambec84e62014-06-30 13:01:32 +05303390static int be_get_sriov_config(struct be_adapter *adapter)
3391{
3392 struct device *dev = &adapter->pdev->dev;
3393 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303394 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303395
3396 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303397 be_cmd_get_profile_config(adapter, &res, 0);
3398
Vasundhara Volambec84e62014-06-30 13:01:32 +05303399 if (BE3_chip(adapter) && !res.max_vfs) {
3400 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3401 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3402 }
3403
Sathya Perlad3d18312014-08-01 17:47:30 +05303404 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303405
3406 if (!be_max_vfs(adapter)) {
3407 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303408 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303409 adapter->num_vfs = 0;
3410 return 0;
3411 }
3412
Sathya Perlad3d18312014-08-01 17:47:30 +05303413 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3414
Vasundhara Volambec84e62014-06-30 13:01:32 +05303415 /* validate num_vfs module param */
3416 old_vfs = pci_num_vf(adapter->pdev);
3417 if (old_vfs) {
3418 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3419 if (old_vfs != num_vfs)
3420 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3421 adapter->num_vfs = old_vfs;
3422 } else {
3423 if (num_vfs > be_max_vfs(adapter)) {
3424 dev_info(dev, "Resources unavailable to init %d VFs\n",
3425 num_vfs);
3426 dev_info(dev, "Limiting to %d VFs\n",
3427 be_max_vfs(adapter));
3428 }
3429 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3430 }
3431
3432 return 0;
3433}
3434
Sathya Perla92bf14a2013-08-27 16:57:32 +05303435static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003436{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303437 struct device *dev = &adapter->pdev->dev;
3438 struct be_resources res = {0};
3439 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003440
Sathya Perla92bf14a2013-08-27 16:57:32 +05303441 if (BEx_chip(adapter)) {
3442 BEx_get_resources(adapter, &res);
3443 adapter->res = res;
3444 }
3445
Sathya Perla92bf14a2013-08-27 16:57:32 +05303446 /* For Lancer, SH etc read per-function resource limits from FW.
3447 * GET_FUNC_CONFIG returns per function guaranteed limits.
3448 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3449 */
Sathya Perla4c876612013-02-03 20:30:11 +00003450 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303451 status = be_cmd_get_func_config(adapter, &res);
3452 if (status)
3453 return status;
3454
3455 /* If RoCE may be enabled stash away half the EQs for RoCE */
3456 if (be_roce_supported(adapter))
3457 res.max_evt_qs /= 2;
3458 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003459 }
3460
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303461 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3462 be_max_txqs(adapter), be_max_rxqs(adapter),
3463 be_max_rss(adapter), be_max_eqs(adapter),
3464 be_max_vfs(adapter));
3465 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3466 be_max_uc(adapter), be_max_mc(adapter),
3467 be_max_vlans(adapter));
3468
Sathya Perla92bf14a2013-08-27 16:57:32 +05303469 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003470}
3471
Sathya Perlad3d18312014-08-01 17:47:30 +05303472static void be_sriov_config(struct be_adapter *adapter)
3473{
3474 struct device *dev = &adapter->pdev->dev;
3475 int status;
3476
3477 status = be_get_sriov_config(adapter);
3478 if (status) {
3479 dev_err(dev, "Failed to query SR-IOV configuration\n");
3480 dev_err(dev, "SR-IOV cannot be enabled\n");
3481 return;
3482 }
3483
3484 /* When the HW is in SRIOV capable configuration, the PF-pool
3485 * resources are equally distributed across the max-number of
3486 * VFs. The user may request only a subset of the max-vfs to be
3487 * enabled. Based on num_vfs, redistribute the resources across
3488 * num_vfs so that each VF will have access to more number of
3489 * resources. This facility is not available in BE3 FW.
3490 * Also, this is done by FW in Lancer chip.
3491 */
3492 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3493 status = be_cmd_set_sriov_config(adapter,
3494 adapter->pool_res,
3495 adapter->num_vfs);
3496 if (status)
3497 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3498 }
3499}
3500
Sathya Perla39f1d942012-05-08 19:41:24 +00003501static int be_get_config(struct be_adapter *adapter)
3502{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303503 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003504 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003505
Kalesh APe97e3cd2014-07-17 16:20:26 +05303506 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003507 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303508 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003509
Vasundhara Volam542963b2014-01-15 13:23:33 +05303510 if (be_physfn(adapter)) {
3511 status = be_cmd_get_active_profile(adapter, &profile_id);
3512 if (!status)
3513 dev_info(&adapter->pdev->dev,
3514 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303515 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303516
Sathya Perlad3d18312014-08-01 17:47:30 +05303517 if (!BE2_chip(adapter) && be_physfn(adapter))
3518 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303519
Sathya Perla92bf14a2013-08-27 16:57:32 +05303520 status = be_get_resources(adapter);
3521 if (status)
3522 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003523
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303524 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3525 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303526 if (!adapter->pmac_id)
3527 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003528
Sathya Perla92bf14a2013-08-27 16:57:32 +05303529 /* Sanitize cfg_num_qs based on HW and platform limits */
3530 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3531
3532 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003533}
3534
Sathya Perla95046b92013-07-23 15:25:02 +05303535static int be_mac_setup(struct be_adapter *adapter)
3536{
3537 u8 mac[ETH_ALEN];
3538 int status;
3539
3540 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3541 status = be_cmd_get_perm_mac(adapter, mac);
3542 if (status)
3543 return status;
3544
3545 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3546 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3547 } else {
3548 /* Maybe the HW was reset; dev_addr must be re-programmed */
3549 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3550 }
3551
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003552 /* For BE3-R VFs, the PF programs the initial MAC address */
3553 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3554 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3555 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303556 return 0;
3557}
3558
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303559static void be_schedule_worker(struct be_adapter *adapter)
3560{
3561 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3562 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3563}
3564
Sathya Perla77071332013-08-27 16:57:34 +05303565static int be_setup_queues(struct be_adapter *adapter)
3566{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303567 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303568 int status;
3569
3570 status = be_evt_queues_create(adapter);
3571 if (status)
3572 goto err;
3573
3574 status = be_tx_qs_create(adapter);
3575 if (status)
3576 goto err;
3577
3578 status = be_rx_cqs_create(adapter);
3579 if (status)
3580 goto err;
3581
3582 status = be_mcc_queues_create(adapter);
3583 if (status)
3584 goto err;
3585
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303586 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3587 if (status)
3588 goto err;
3589
3590 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3591 if (status)
3592 goto err;
3593
Sathya Perla77071332013-08-27 16:57:34 +05303594 return 0;
3595err:
3596 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3597 return status;
3598}
3599
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303600int be_update_queues(struct be_adapter *adapter)
3601{
3602 struct net_device *netdev = adapter->netdev;
3603 int status;
3604
3605 if (netif_running(netdev))
3606 be_close(netdev);
3607
3608 be_cancel_worker(adapter);
3609
3610 /* If any vectors have been shared with RoCE we cannot re-program
3611 * the MSIx table.
3612 */
3613 if (!adapter->num_msix_roce_vec)
3614 be_msix_disable(adapter);
3615
3616 be_clear_queues(adapter);
3617
3618 if (!msix_enabled(adapter)) {
3619 status = be_msix_enable(adapter);
3620 if (status)
3621 return status;
3622 }
3623
3624 status = be_setup_queues(adapter);
3625 if (status)
3626 return status;
3627
3628 be_schedule_worker(adapter);
3629
3630 if (netif_running(netdev))
3631 status = be_open(netdev);
3632
3633 return status;
3634}
3635
Sathya Perla5fb379e2009-06-18 00:02:59 +00003636static int be_setup(struct be_adapter *adapter)
3637{
Sathya Perla39f1d942012-05-08 19:41:24 +00003638 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303639 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003640 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003641
Sathya Perla30128032011-11-10 19:17:57 +00003642 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003643
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003644 if (!lancer_chip(adapter))
3645 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003646
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003647 status = be_get_config(adapter);
3648 if (status)
3649 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003650
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003651 status = be_msix_enable(adapter);
3652 if (status)
3653 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003654
Sathya Perla77071332013-08-27 16:57:34 +05303655 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3656 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3657 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3658 en_flags |= BE_IF_FLAGS_RSS;
3659 en_flags = en_flags & be_if_cap_flags(adapter);
3660 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3661 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003662 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003663 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003664
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303665 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3666 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303667 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303668 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003669 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003670 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003671
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003672 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003673
Sathya Perla95046b92013-07-23 15:25:02 +05303674 status = be_mac_setup(adapter);
3675 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003676 goto err;
3677
Kalesh APe97e3cd2014-07-17 16:20:26 +05303678 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303679 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003680
Somnath Koture9e2a902013-10-24 14:37:53 +05303681 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303682 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303683 adapter->fw_ver);
3684 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3685 }
3686
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003687 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003688 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003689
3690 be_set_rx_mode(adapter->netdev);
3691
Suresh Reddy76a9e082014-01-15 13:23:40 +05303692 be_cmd_get_acpi_wol_cap(adapter);
3693
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003694 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003695
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003696 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3697 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003698 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003699
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303700 if (be_physfn(adapter))
3701 be_cmd_set_logical_link_config(adapter,
3702 IFLA_VF_LINK_STATE_AUTO, 0);
3703
Vasundhara Volambec84e62014-06-30 13:01:32 +05303704 if (adapter->num_vfs)
3705 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003706
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003707 status = be_cmd_get_phy_info(adapter);
3708 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003709 adapter->phy.fc_autoneg = 1;
3710
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303711 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303712 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003713 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003714err:
3715 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003716 return status;
3717}
3718
Ivan Vecera66268732011-12-08 01:31:21 +00003719#ifdef CONFIG_NET_POLL_CONTROLLER
3720static void be_netpoll(struct net_device *netdev)
3721{
3722 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003723 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003724 int i;
3725
Sathya Perlae49cc342012-11-27 19:50:02 +00003726 for_all_evt_queues(adapter, eqo, i) {
3727 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3728 napi_schedule(&eqo->napi);
3729 }
Ivan Vecera66268732011-12-08 01:31:21 +00003730}
3731#endif
3732
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303733static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003734
Sathya Perla306f1342011-08-02 19:57:45 +00003735static bool phy_flashing_required(struct be_adapter *adapter)
3736{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003737 return (adapter->phy.phy_type == TN_8022 &&
3738 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003739}
3740
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003741static bool is_comp_in_ufi(struct be_adapter *adapter,
3742 struct flash_section_info *fsec, int type)
3743{
3744 int i = 0, img_type = 0;
3745 struct flash_section_info_g2 *fsec_g2 = NULL;
3746
Sathya Perlaca34fe32012-11-06 17:48:56 +00003747 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003748 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3749
3750 for (i = 0; i < MAX_FLASH_COMP; i++) {
3751 if (fsec_g2)
3752 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3753 else
3754 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3755
3756 if (img_type == type)
3757 return true;
3758 }
3759 return false;
3760
3761}
3762
Jingoo Han4188e7d2013-08-05 18:02:02 +09003763static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303764 int header_size,
3765 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003766{
3767 struct flash_section_info *fsec = NULL;
3768 const u8 *p = fw->data;
3769
3770 p += header_size;
3771 while (p < (fw->data + fw->size)) {
3772 fsec = (struct flash_section_info *)p;
3773 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3774 return fsec;
3775 p += 32;
3776 }
3777 return NULL;
3778}
3779
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303780static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3781 u32 img_offset, u32 img_size, int hdr_size,
3782 u16 img_optype, bool *crc_match)
3783{
3784 u32 crc_offset;
3785 int status;
3786 u8 crc[4];
3787
3788 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3789 if (status)
3790 return status;
3791
3792 crc_offset = hdr_size + img_offset + img_size - 4;
3793
3794 /* Skip flashing, if crc of flashed region matches */
3795 if (!memcmp(crc, p + crc_offset, 4))
3796 *crc_match = true;
3797 else
3798 *crc_match = false;
3799
3800 return status;
3801}
3802
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003803static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303804 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003805{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003806 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303807 u32 total_bytes, flash_op, num_bytes;
3808 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003809
3810 total_bytes = img_size;
3811 while (total_bytes) {
3812 num_bytes = min_t(u32, 32*1024, total_bytes);
3813
3814 total_bytes -= num_bytes;
3815
3816 if (!total_bytes) {
3817 if (optype == OPTYPE_PHY_FW)
3818 flash_op = FLASHROM_OPER_PHY_FLASH;
3819 else
3820 flash_op = FLASHROM_OPER_FLASH;
3821 } else {
3822 if (optype == OPTYPE_PHY_FW)
3823 flash_op = FLASHROM_OPER_PHY_SAVE;
3824 else
3825 flash_op = FLASHROM_OPER_SAVE;
3826 }
3827
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003828 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003829 img += num_bytes;
3830 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303831 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303832 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303833 optype == OPTYPE_PHY_FW)
3834 break;
3835 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003836 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003837 }
3838 return 0;
3839}
3840
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003841/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003842static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303843 const struct firmware *fw,
3844 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003845{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003846 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303847 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003848 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303849 int status, i, filehdr_size, num_comp;
3850 const struct flash_comp *pflashcomp;
3851 bool crc_match;
3852 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003853
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003854 struct flash_comp gen3_flash_types[] = {
3855 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3856 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3857 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3858 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3859 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3860 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3861 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3862 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3863 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3864 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3865 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3866 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3867 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3868 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3869 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3870 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3871 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3872 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3873 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3874 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003875 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003876
3877 struct flash_comp gen2_flash_types[] = {
3878 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3879 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3880 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3881 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3882 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3883 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3884 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3885 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3886 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3887 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3888 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3889 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3890 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3891 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3892 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3893 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003894 };
3895
Sathya Perlaca34fe32012-11-06 17:48:56 +00003896 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003897 pflashcomp = gen3_flash_types;
3898 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003899 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003900 } else {
3901 pflashcomp = gen2_flash_types;
3902 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003903 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003904 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003905
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003906 /* Get flash section info*/
3907 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3908 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303909 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003910 return -1;
3911 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003912 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003913 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003914 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003915
3916 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3917 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3918 continue;
3919
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003920 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3921 !phy_flashing_required(adapter))
3922 continue;
3923
3924 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303925 status = be_check_flash_crc(adapter, fw->data,
3926 pflashcomp[i].offset,
3927 pflashcomp[i].size,
3928 filehdr_size +
3929 img_hdrs_size,
3930 OPTYPE_REDBOOT, &crc_match);
3931 if (status) {
3932 dev_err(dev,
3933 "Could not get CRC for 0x%x region\n",
3934 pflashcomp[i].optype);
3935 continue;
3936 }
3937
3938 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003939 continue;
3940 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003941
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303942 p = fw->data + filehdr_size + pflashcomp[i].offset +
3943 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003944 if (p + pflashcomp[i].size > fw->data + fw->size)
3945 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003946
3947 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303948 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003949 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303950 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003951 pflashcomp[i].img_type);
3952 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003953 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003954 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003955 return 0;
3956}
3957
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303958static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3959{
3960 u32 img_type = le32_to_cpu(fsec_entry.type);
3961 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3962
3963 if (img_optype != 0xFFFF)
3964 return img_optype;
3965
3966 switch (img_type) {
3967 case IMAGE_FIRMWARE_iSCSI:
3968 img_optype = OPTYPE_ISCSI_ACTIVE;
3969 break;
3970 case IMAGE_BOOT_CODE:
3971 img_optype = OPTYPE_REDBOOT;
3972 break;
3973 case IMAGE_OPTION_ROM_ISCSI:
3974 img_optype = OPTYPE_BIOS;
3975 break;
3976 case IMAGE_OPTION_ROM_PXE:
3977 img_optype = OPTYPE_PXE_BIOS;
3978 break;
3979 case IMAGE_OPTION_ROM_FCoE:
3980 img_optype = OPTYPE_FCOE_BIOS;
3981 break;
3982 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3983 img_optype = OPTYPE_ISCSI_BACKUP;
3984 break;
3985 case IMAGE_NCSI:
3986 img_optype = OPTYPE_NCSI_FW;
3987 break;
3988 case IMAGE_FLASHISM_JUMPVECTOR:
3989 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3990 break;
3991 case IMAGE_FIRMWARE_PHY:
3992 img_optype = OPTYPE_SH_PHY_FW;
3993 break;
3994 case IMAGE_REDBOOT_DIR:
3995 img_optype = OPTYPE_REDBOOT_DIR;
3996 break;
3997 case IMAGE_REDBOOT_CONFIG:
3998 img_optype = OPTYPE_REDBOOT_CONFIG;
3999 break;
4000 case IMAGE_UFI_DIR:
4001 img_optype = OPTYPE_UFI_DIR;
4002 break;
4003 default:
4004 break;
4005 }
4006
4007 return img_optype;
4008}
4009
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004010static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304011 const struct firmware *fw,
4012 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004013{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004014 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304015 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004016 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304017 u32 img_offset, img_size, img_type;
4018 int status, i, filehdr_size;
4019 bool crc_match, old_fw_img;
4020 u16 img_optype;
4021 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004022
4023 filehdr_size = sizeof(struct flash_file_hdr_g3);
4024 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4025 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304026 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304027 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004028 }
4029
4030 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4031 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4032 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304033 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4034 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4035 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004036
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304037 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004038 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304039 /* Don't bother verifying CRC if an old FW image is being
4040 * flashed
4041 */
4042 if (old_fw_img)
4043 goto flash;
4044
4045 status = be_check_flash_crc(adapter, fw->data, img_offset,
4046 img_size, filehdr_size +
4047 img_hdrs_size, img_optype,
4048 &crc_match);
4049 /* The current FW image on the card does not recognize the new
4050 * FLASH op_type. The FW download is partially complete.
4051 * Reboot the server now to enable FW image to recognize the
4052 * new FLASH op_type. To complete the remaining process,
4053 * download the same FW again after the reboot.
4054 */
Kalesh AP4c600052014-05-30 19:06:26 +05304055 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4056 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304057 dev_err(dev, "Flash incomplete. Reset the server\n");
4058 dev_err(dev, "Download FW image again after reset\n");
4059 return -EAGAIN;
4060 } else if (status) {
4061 dev_err(dev, "Could not get CRC for 0x%x region\n",
4062 img_optype);
4063 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004064 }
4065
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304066 if (crc_match)
4067 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004068
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304069flash:
4070 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004071 if (p + img_size > fw->data + fw->size)
4072 return -1;
4073
4074 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304075 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4076 * UFI_DIR region
4077 */
Kalesh AP4c600052014-05-30 19:06:26 +05304078 if (old_fw_img &&
4079 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4080 (img_optype == OPTYPE_UFI_DIR &&
4081 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304082 continue;
4083 } else if (status) {
4084 dev_err(dev, "Flashing section type 0x%x failed\n",
4085 img_type);
4086 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004087 }
4088 }
4089 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004090}
4091
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004092static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304093 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004094{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004095#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4096#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304097 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004098 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004099 const u8 *data_ptr = NULL;
4100 u8 *dest_image_ptr = NULL;
4101 size_t image_size = 0;
4102 u32 chunk_size = 0;
4103 u32 data_written = 0;
4104 u32 offset = 0;
4105 int status = 0;
4106 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004107 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004108
4109 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304110 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304111 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004112 }
4113
4114 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4115 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304116 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004117 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304118 if (!flash_cmd.va)
4119 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004120
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004121 dest_image_ptr = flash_cmd.va +
4122 sizeof(struct lancer_cmd_req_write_object);
4123 image_size = fw->size;
4124 data_ptr = fw->data;
4125
4126 while (image_size) {
4127 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4128
4129 /* Copy the image chunk content. */
4130 memcpy(dest_image_ptr, data_ptr, chunk_size);
4131
4132 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004133 chunk_size, offset,
4134 LANCER_FW_DOWNLOAD_LOCATION,
4135 &data_written, &change_status,
4136 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004137 if (status)
4138 break;
4139
4140 offset += data_written;
4141 data_ptr += data_written;
4142 image_size -= data_written;
4143 }
4144
4145 if (!status) {
4146 /* Commit the FW written */
4147 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004148 0, offset,
4149 LANCER_FW_DOWNLOAD_LOCATION,
4150 &data_written, &change_status,
4151 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004152 }
4153
Kalesh APbb864e02014-09-02 09:56:51 +05304154 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004155 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304156 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304157 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004158 }
4159
Kalesh APbb864e02014-09-02 09:56:51 +05304160 dev_info(dev, "Firmware flashed successfully\n");
4161
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004162 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304163 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004164 status = lancer_physdev_ctrl(adapter,
4165 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004166 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304167 dev_err(dev, "Adapter busy, could not reset FW\n");
4168 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004169 }
4170 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304171 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004172 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304173
4174 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004175}
4176
Sathya Perlaca34fe32012-11-06 17:48:56 +00004177#define UFI_TYPE2 2
4178#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004179#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004180#define UFI_TYPE4 4
4181static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004182 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004183{
Kalesh APddf11692014-07-17 16:20:28 +05304184 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004185 goto be_get_ufi_exit;
4186
Sathya Perlaca34fe32012-11-06 17:48:56 +00004187 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4188 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004189 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4190 if (fhdr->asic_type_rev == 0x10)
4191 return UFI_TYPE3R;
4192 else
4193 return UFI_TYPE3;
4194 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004195 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004196
4197be_get_ufi_exit:
4198 dev_err(&adapter->pdev->dev,
4199 "UFI and Interface are not compatible for flashing\n");
4200 return -1;
4201}
4202
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004203static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4204{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004205 struct flash_file_hdr_g3 *fhdr3;
4206 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004207 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004208 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004209 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004210
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004211 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004212 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4213 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004214 if (!flash_cmd.va) {
4215 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004216 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004217 }
4218
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004219 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004220 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004221
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004222 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004223
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004224 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4225 for (i = 0; i < num_imgs; i++) {
4226 img_hdr_ptr = (struct image_hdr *)(fw->data +
4227 (sizeof(struct flash_file_hdr_g3) +
4228 i * sizeof(struct image_hdr)));
4229 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004230 switch (ufi_type) {
4231 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004232 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304233 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004234 break;
4235 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004236 status = be_flash_BEx(adapter, fw, &flash_cmd,
4237 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004238 break;
4239 case UFI_TYPE3:
4240 /* Do not flash this ufi on BE3-R cards */
4241 if (adapter->asic_rev < 0x10)
4242 status = be_flash_BEx(adapter, fw,
4243 &flash_cmd,
4244 num_imgs);
4245 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304246 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004247 dev_err(&adapter->pdev->dev,
4248 "Can't load BE3 UFI on BE3R\n");
4249 }
4250 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004251 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004252 }
4253
Sathya Perlaca34fe32012-11-06 17:48:56 +00004254 if (ufi_type == UFI_TYPE2)
4255 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004256 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304257 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004258
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004259 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4260 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004261 if (status) {
4262 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004263 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004264 }
4265
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004266 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004267
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004268be_fw_exit:
4269 return status;
4270}
4271
4272int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4273{
4274 const struct firmware *fw;
4275 int status;
4276
4277 if (!netif_running(adapter->netdev)) {
4278 dev_err(&adapter->pdev->dev,
4279 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304280 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004281 }
4282
4283 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4284 if (status)
4285 goto fw_exit;
4286
4287 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4288
4289 if (lancer_chip(adapter))
4290 status = lancer_fw_download(adapter, fw);
4291 else
4292 status = be_fw_download(adapter, fw);
4293
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004294 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304295 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004296
Ajit Khaparde84517482009-09-04 03:12:16 +00004297fw_exit:
4298 release_firmware(fw);
4299 return status;
4300}
4301
Sathya Perla748b5392014-05-09 13:29:13 +05304302static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004303{
4304 struct be_adapter *adapter = netdev_priv(dev);
4305 struct nlattr *attr, *br_spec;
4306 int rem;
4307 int status = 0;
4308 u16 mode = 0;
4309
4310 if (!sriov_enabled(adapter))
4311 return -EOPNOTSUPP;
4312
4313 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4314
4315 nla_for_each_nested(attr, br_spec, rem) {
4316 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4317 continue;
4318
4319 mode = nla_get_u16(attr);
4320 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4321 return -EINVAL;
4322
4323 status = be_cmd_set_hsw_config(adapter, 0, 0,
4324 adapter->if_handle,
4325 mode == BRIDGE_MODE_VEPA ?
4326 PORT_FWD_TYPE_VEPA :
4327 PORT_FWD_TYPE_VEB);
4328 if (status)
4329 goto err;
4330
4331 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4332 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4333
4334 return status;
4335 }
4336err:
4337 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4338 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4339
4340 return status;
4341}
4342
4343static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304344 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004345{
4346 struct be_adapter *adapter = netdev_priv(dev);
4347 int status = 0;
4348 u8 hsw_mode;
4349
4350 if (!sriov_enabled(adapter))
4351 return 0;
4352
4353 /* BE and Lancer chips support VEB mode only */
4354 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4355 hsw_mode = PORT_FWD_TYPE_VEB;
4356 } else {
4357 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4358 adapter->if_handle, &hsw_mode);
4359 if (status)
4360 return 0;
4361 }
4362
4363 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4364 hsw_mode == PORT_FWD_TYPE_VEPA ?
4365 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4366}
4367
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304368#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304369static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4370 __be16 port)
4371{
4372 struct be_adapter *adapter = netdev_priv(netdev);
4373 struct device *dev = &adapter->pdev->dev;
4374 int status;
4375
4376 if (lancer_chip(adapter) || BEx_chip(adapter))
4377 return;
4378
4379 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4380 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4381 be16_to_cpu(port));
4382 dev_info(dev,
4383 "Only one UDP port supported for VxLAN offloads\n");
4384 return;
4385 }
4386
4387 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4388 OP_CONVERT_NORMAL_TO_TUNNEL);
4389 if (status) {
4390 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4391 goto err;
4392 }
4393
4394 status = be_cmd_set_vxlan_port(adapter, port);
4395 if (status) {
4396 dev_warn(dev, "Failed to add VxLAN port\n");
4397 goto err;
4398 }
4399 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4400 adapter->vxlan_port = port;
4401
4402 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4403 be16_to_cpu(port));
4404 return;
4405err:
4406 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304407}
4408
4409static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4410 __be16 port)
4411{
4412 struct be_adapter *adapter = netdev_priv(netdev);
4413
4414 if (lancer_chip(adapter) || BEx_chip(adapter))
4415 return;
4416
4417 if (adapter->vxlan_port != port)
4418 return;
4419
4420 be_disable_vxlan_offloads(adapter);
4421
4422 dev_info(&adapter->pdev->dev,
4423 "Disabled VxLAN offloads for UDP port %d\n",
4424 be16_to_cpu(port));
4425}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304426#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304427
stephen hemmingere5686ad2012-01-05 19:10:25 +00004428static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004429 .ndo_open = be_open,
4430 .ndo_stop = be_close,
4431 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004432 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004433 .ndo_set_mac_address = be_mac_addr_set,
4434 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004435 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004436 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004437 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4438 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004439 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004440 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004441 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004442 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304443 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004444#ifdef CONFIG_NET_POLL_CONTROLLER
4445 .ndo_poll_controller = be_netpoll,
4446#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004447 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4448 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304449#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304450 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304451#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304452#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304453 .ndo_add_vxlan_port = be_add_vxlan_port,
4454 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304455#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004456};
4457
4458static void be_netdev_init(struct net_device *netdev)
4459{
4460 struct be_adapter *adapter = netdev_priv(netdev);
4461
Sathya Perlac9c47142014-03-27 10:46:19 +05304462 if (skyhawk_chip(adapter)) {
4463 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4464 NETIF_F_TSO | NETIF_F_TSO6 |
4465 NETIF_F_GSO_UDP_TUNNEL;
4466 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4467 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004468 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004469 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004470 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004471 if (be_multi_rxq(adapter))
4472 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004473
4474 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004475 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004476
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004477 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004478 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004479
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004480 netdev->priv_flags |= IFF_UNICAST_FLT;
4481
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004482 netdev->flags |= IFF_MULTICAST;
4483
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004484 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004485
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004486 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004487
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004488 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004489}
4490
4491static void be_unmap_pci_bars(struct be_adapter *adapter)
4492{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004493 if (adapter->csr)
4494 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004495 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004496 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004497}
4498
Sathya Perlace66f782012-11-06 17:48:58 +00004499static int db_bar(struct be_adapter *adapter)
4500{
4501 if (lancer_chip(adapter) || !be_physfn(adapter))
4502 return 0;
4503 else
4504 return 4;
4505}
4506
4507static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004508{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004509 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004510 adapter->roce_db.size = 4096;
4511 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4512 db_bar(adapter));
4513 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4514 db_bar(adapter));
4515 }
Parav Pandit045508a2012-03-26 14:27:13 +00004516 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004517}
4518
4519static int be_map_pci_bars(struct be_adapter *adapter)
4520{
4521 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004522
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004523 if (BEx_chip(adapter) && be_physfn(adapter)) {
4524 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304525 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004526 return -ENOMEM;
4527 }
4528
Sathya Perlace66f782012-11-06 17:48:58 +00004529 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304530 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004531 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004532 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004533
4534 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004535 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004536
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004537pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304538 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004539 be_unmap_pci_bars(adapter);
4540 return -ENOMEM;
4541}
4542
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004543static void be_ctrl_cleanup(struct be_adapter *adapter)
4544{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004545 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004546
4547 be_unmap_pci_bars(adapter);
4548
4549 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004550 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4551 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004552
Sathya Perla5b8821b2011-08-02 19:57:44 +00004553 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004554 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004555 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4556 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004557}
4558
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004559static int be_ctrl_init(struct be_adapter *adapter)
4560{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004561 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4562 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004563 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004564 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004565 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004566
Sathya Perlace66f782012-11-06 17:48:58 +00004567 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4568 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4569 SLI_INTF_FAMILY_SHIFT;
4570 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4571
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004572 status = be_map_pci_bars(adapter);
4573 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004574 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004575
4576 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004577 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4578 mbox_mem_alloc->size,
4579 &mbox_mem_alloc->dma,
4580 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004581 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004582 status = -ENOMEM;
4583 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004584 }
4585 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4586 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4587 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4588 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004589
Sathya Perla5b8821b2011-08-02 19:57:44 +00004590 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa82013-08-26 22:45:23 -07004591 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4592 rx_filter->size, &rx_filter->dma,
4593 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304594 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004595 status = -ENOMEM;
4596 goto free_mbox;
4597 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004598
Ivan Vecera29849612010-12-14 05:43:19 +00004599 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004600 spin_lock_init(&adapter->mcc_lock);
4601 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004602
Suresh Reddy5eeff632014-01-06 13:02:24 +05304603 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004604 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004605 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004606
4607free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004608 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4609 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004610
4611unmap_pci_bars:
4612 be_unmap_pci_bars(adapter);
4613
4614done:
4615 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004616}
4617
4618static void be_stats_cleanup(struct be_adapter *adapter)
4619{
Sathya Perla3abcded2010-10-03 22:12:27 -07004620 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004621
4622 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004623 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4624 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004625}
4626
4627static int be_stats_init(struct be_adapter *adapter)
4628{
Sathya Perla3abcded2010-10-03 22:12:27 -07004629 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004630
Sathya Perlaca34fe32012-11-06 17:48:56 +00004631 if (lancer_chip(adapter))
4632 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4633 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004634 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004635 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004636 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004637 else
4638 /* ALL non-BE ASICs */
4639 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004640
Joe Perchesede23fa82013-08-26 22:45:23 -07004641 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4642 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304643 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304644 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004645 return 0;
4646}
4647
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004648static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004649{
4650 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004651
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004652 if (!adapter)
4653 return;
4654
Parav Pandit045508a2012-03-26 14:27:13 +00004655 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004656 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004657
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004658 cancel_delayed_work_sync(&adapter->func_recovery_work);
4659
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004660 unregister_netdev(adapter->netdev);
4661
Sathya Perla5fb379e2009-06-18 00:02:59 +00004662 be_clear(adapter);
4663
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004664 /* tell fw we're done with firing cmds */
4665 be_cmd_fw_clean(adapter);
4666
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004667 be_stats_cleanup(adapter);
4668
4669 be_ctrl_cleanup(adapter);
4670
Sathya Perlad6b6d982012-09-05 01:56:48 +00004671 pci_disable_pcie_error_reporting(pdev);
4672
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004673 pci_release_regions(pdev);
4674 pci_disable_device(pdev);
4675
4676 free_netdev(adapter->netdev);
4677}
4678
Sathya Perla39f1d942012-05-08 19:41:24 +00004679static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004680{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304681 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004682
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004683 status = be_cmd_get_cntl_attributes(adapter);
4684 if (status)
4685 return status;
4686
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004687 /* Must be a power of 2 or else MODULO will BUG_ON */
4688 adapter->be_get_temp_freq = 64;
4689
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304690 if (BEx_chip(adapter)) {
4691 level = be_cmd_get_fw_log_level(adapter);
4692 adapter->msg_enable =
4693 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4694 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004695
Sathya Perla92bf14a2013-08-27 16:57:32 +05304696 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004697 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004698}
4699
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004700static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004701{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004702 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004703 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004704
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004705 status = lancer_test_and_set_rdy_state(adapter);
4706 if (status)
4707 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004708
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004709 if (netif_running(adapter->netdev))
4710 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004711
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004712 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004713
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004714 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004715
4716 status = be_setup(adapter);
4717 if (status)
4718 goto err;
4719
4720 if (netif_running(adapter->netdev)) {
4721 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004722 if (status)
4723 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004724 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004725
Somnath Kotur4bebb562013-12-05 12:07:55 +05304726 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004727 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004728err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004729 if (status == -EAGAIN)
4730 dev_err(dev, "Waiting for resource provisioning\n");
4731 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304732 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004733
4734 return status;
4735}
4736
4737static void be_func_recovery_task(struct work_struct *work)
4738{
4739 struct be_adapter *adapter =
4740 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004741 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004742
4743 be_detect_error(adapter);
4744
4745 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004746 rtnl_lock();
4747 netif_device_detach(adapter->netdev);
4748 rtnl_unlock();
4749
4750 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004751 if (!status)
4752 netif_device_attach(adapter->netdev);
4753 }
4754
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004755 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4756 * no need to attempt further recovery.
4757 */
4758 if (!status || status == -EAGAIN)
4759 schedule_delayed_work(&adapter->func_recovery_work,
4760 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004761}
4762
4763static void be_worker(struct work_struct *work)
4764{
4765 struct be_adapter *adapter =
4766 container_of(work, struct be_adapter, work.work);
4767 struct be_rx_obj *rxo;
4768 int i;
4769
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004770 /* when interrupts are not yet enabled, just reap any pending
4771 * mcc completions */
4772 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004773 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004774 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004775 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004776 goto reschedule;
4777 }
4778
4779 if (!adapter->stats_cmd_sent) {
4780 if (lancer_chip(adapter))
4781 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05304782 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004783 else
4784 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4785 }
4786
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304787 if (be_physfn(adapter) &&
4788 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004789 be_cmd_get_die_temperature(adapter);
4790
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004791 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304792 /* Replenish RX-queues starved due to memory
4793 * allocation failures.
4794 */
4795 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05304796 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004797 }
4798
Sathya Perla2632baf2013-10-01 16:00:00 +05304799 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004800
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004801reschedule:
4802 adapter->work_counter++;
4803 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4804}
4805
Sathya Perla257a3fe2013-06-14 15:54:51 +05304806/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004807static bool be_reset_required(struct be_adapter *adapter)
4808{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304809 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004810}
4811
Sathya Perlad3791422012-09-28 04:39:44 +00004812static char *mc_name(struct be_adapter *adapter)
4813{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304814 char *str = ""; /* default */
4815
4816 switch (adapter->mc_type) {
4817 case UMC:
4818 str = "UMC";
4819 break;
4820 case FLEX10:
4821 str = "FLEX10";
4822 break;
4823 case vNIC1:
4824 str = "vNIC-1";
4825 break;
4826 case nPAR:
4827 str = "nPAR";
4828 break;
4829 case UFP:
4830 str = "UFP";
4831 break;
4832 case vNIC2:
4833 str = "vNIC-2";
4834 break;
4835 default:
4836 str = "";
4837 }
4838
4839 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004840}
4841
4842static inline char *func_name(struct be_adapter *adapter)
4843{
4844 return be_physfn(adapter) ? "PF" : "VF";
4845}
4846
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004847static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004848{
4849 int status = 0;
4850 struct be_adapter *adapter;
4851 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004852 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004853
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304854 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4855
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004856 status = pci_enable_device(pdev);
4857 if (status)
4858 goto do_none;
4859
4860 status = pci_request_regions(pdev, DRV_NAME);
4861 if (status)
4862 goto disable_dev;
4863 pci_set_master(pdev);
4864
Sathya Perla7f640062012-06-05 19:37:20 +00004865 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304866 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004867 status = -ENOMEM;
4868 goto rel_reg;
4869 }
4870 adapter = netdev_priv(netdev);
4871 adapter->pdev = pdev;
4872 pci_set_drvdata(pdev, adapter);
4873 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004874 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004875
Russell King4c15c242013-06-26 23:49:11 +01004876 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004877 if (!status) {
4878 netdev->features |= NETIF_F_HIGHDMA;
4879 } else {
Russell King4c15c242013-06-26 23:49:11 +01004880 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004881 if (status) {
4882 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4883 goto free_netdev;
4884 }
4885 }
4886
Kalesh AP2f951a92014-09-12 17:39:21 +05304887 status = pci_enable_pcie_error_reporting(pdev);
4888 if (!status)
4889 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004890
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004891 status = be_ctrl_init(adapter);
4892 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004893 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004894
Sathya Perla2243e2e2009-11-22 22:02:03 +00004895 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004896 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004897 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004898 if (status)
4899 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004900 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004901
Sathya Perla39f1d942012-05-08 19:41:24 +00004902 if (be_reset_required(adapter)) {
4903 status = be_cmd_reset_function(adapter);
4904 if (status)
4905 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004906
Kalesh AP2d177be2013-04-28 22:22:29 +00004907 /* Wait for interrupts to quiesce after an FLR */
4908 msleep(100);
4909 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004910
4911 /* Allow interrupts for other ULPs running on NIC function */
4912 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004913
Kalesh AP2d177be2013-04-28 22:22:29 +00004914 /* tell fw we're ready to fire cmds */
4915 status = be_cmd_fw_init(adapter);
4916 if (status)
4917 goto ctrl_clean;
4918
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004919 status = be_stats_init(adapter);
4920 if (status)
4921 goto ctrl_clean;
4922
Sathya Perla39f1d942012-05-08 19:41:24 +00004923 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004924 if (status)
4925 goto stats_clean;
4926
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004927 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004928 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05304929 adapter->rx_fc = true;
4930 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004931
Sathya Perla5fb379e2009-06-18 00:02:59 +00004932 status = be_setup(adapter);
4933 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004934 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004935
Sathya Perla3abcded2010-10-03 22:12:27 -07004936 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004937 status = register_netdev(netdev);
4938 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004939 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004940
Parav Pandit045508a2012-03-26 14:27:13 +00004941 be_roce_dev_add(adapter);
4942
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004943 schedule_delayed_work(&adapter->func_recovery_work,
4944 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004945
4946 be_cmd_query_port_name(adapter, &port_name);
4947
Sathya Perlad3791422012-09-28 04:39:44 +00004948 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4949 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004950
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004951 return 0;
4952
Sathya Perla5fb379e2009-06-18 00:02:59 +00004953unsetup:
4954 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004955stats_clean:
4956 be_stats_cleanup(adapter);
4957ctrl_clean:
4958 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004959free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004960 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004961rel_reg:
4962 pci_release_regions(pdev);
4963disable_dev:
4964 pci_disable_device(pdev);
4965do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004966 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004967 return status;
4968}
4969
4970static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4971{
4972 struct be_adapter *adapter = pci_get_drvdata(pdev);
4973 struct net_device *netdev = adapter->netdev;
4974
Suresh Reddy76a9e082014-01-15 13:23:40 +05304975 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004976 be_setup_wol(adapter, true);
4977
Ajit Khaparded4360d62013-11-22 12:51:09 -06004978 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004979 cancel_delayed_work_sync(&adapter->func_recovery_work);
4980
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004981 netif_device_detach(netdev);
4982 if (netif_running(netdev)) {
4983 rtnl_lock();
4984 be_close(netdev);
4985 rtnl_unlock();
4986 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004987 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004988
4989 pci_save_state(pdev);
4990 pci_disable_device(pdev);
4991 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4992 return 0;
4993}
4994
4995static int be_resume(struct pci_dev *pdev)
4996{
4997 int status = 0;
4998 struct be_adapter *adapter = pci_get_drvdata(pdev);
4999 struct net_device *netdev = adapter->netdev;
5000
5001 netif_device_detach(netdev);
5002
5003 status = pci_enable_device(pdev);
5004 if (status)
5005 return status;
5006
Yijing Wang1ca01512013-06-27 20:53:42 +08005007 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005008 pci_restore_state(pdev);
5009
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305010 status = be_fw_wait_ready(adapter);
5011 if (status)
5012 return status;
5013
Ajit Khaparded4360d62013-11-22 12:51:09 -06005014 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005015 /* tell fw we're ready to fire cmds */
5016 status = be_cmd_fw_init(adapter);
5017 if (status)
5018 return status;
5019
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005020 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005021 if (netif_running(netdev)) {
5022 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005023 be_open(netdev);
5024 rtnl_unlock();
5025 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005026
5027 schedule_delayed_work(&adapter->func_recovery_work,
5028 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005029 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005030
Suresh Reddy76a9e082014-01-15 13:23:40 +05305031 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005032 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005033
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005034 return 0;
5035}
5036
Sathya Perla82456b02010-02-17 01:35:37 +00005037/*
5038 * An FLR will stop BE from DMAing any data.
5039 */
5040static void be_shutdown(struct pci_dev *pdev)
5041{
5042 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005043
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005044 if (!adapter)
5045 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005046
Devesh Sharmad114f992014-06-10 19:32:15 +05305047 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005048 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005049 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005050
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005051 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005052
Ajit Khaparde57841862011-04-06 18:08:43 +00005053 be_cmd_reset_function(adapter);
5054
Sathya Perla82456b02010-02-17 01:35:37 +00005055 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005056}
5057
Sathya Perlacf588472010-02-14 21:22:01 +00005058static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305059 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005060{
5061 struct be_adapter *adapter = pci_get_drvdata(pdev);
5062 struct net_device *netdev = adapter->netdev;
5063
5064 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5065
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005066 if (!adapter->eeh_error) {
5067 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005068
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005069 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005070
Sathya Perlacf588472010-02-14 21:22:01 +00005071 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005072 netif_device_detach(netdev);
5073 if (netif_running(netdev))
5074 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005075 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005076
5077 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005078 }
Sathya Perlacf588472010-02-14 21:22:01 +00005079
5080 if (state == pci_channel_io_perm_failure)
5081 return PCI_ERS_RESULT_DISCONNECT;
5082
5083 pci_disable_device(pdev);
5084
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005085 /* The error could cause the FW to trigger a flash debug dump.
5086 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005087 * can cause it not to recover; wait for it to finish.
5088 * Wait only for first function as it is needed only once per
5089 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005090 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005091 if (pdev->devfn == 0)
5092 ssleep(30);
5093
Sathya Perlacf588472010-02-14 21:22:01 +00005094 return PCI_ERS_RESULT_NEED_RESET;
5095}
5096
5097static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5098{
5099 struct be_adapter *adapter = pci_get_drvdata(pdev);
5100 int status;
5101
5102 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005103
5104 status = pci_enable_device(pdev);
5105 if (status)
5106 return PCI_ERS_RESULT_DISCONNECT;
5107
5108 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005109 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005110 pci_restore_state(pdev);
5111
5112 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005113 dev_info(&adapter->pdev->dev,
5114 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005115 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005116 if (status)
5117 return PCI_ERS_RESULT_DISCONNECT;
5118
Sathya Perlad6b6d982012-09-05 01:56:48 +00005119 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005120 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005121 return PCI_ERS_RESULT_RECOVERED;
5122}
5123
5124static void be_eeh_resume(struct pci_dev *pdev)
5125{
5126 int status = 0;
5127 struct be_adapter *adapter = pci_get_drvdata(pdev);
5128 struct net_device *netdev = adapter->netdev;
5129
5130 dev_info(&adapter->pdev->dev, "EEH resume\n");
5131
5132 pci_save_state(pdev);
5133
Kalesh AP2d177be2013-04-28 22:22:29 +00005134 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005135 if (status)
5136 goto err;
5137
Kalesh AP03a58ba2014-05-13 14:03:11 +05305138 /* On some BE3 FW versions, after a HW reset,
5139 * interrupts will remain disabled for each function.
5140 * So, explicitly enable interrupts
5141 */
5142 be_intr_set(adapter, true);
5143
Kalesh AP2d177be2013-04-28 22:22:29 +00005144 /* tell fw we're ready to fire cmds */
5145 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005146 if (status)
5147 goto err;
5148
Sathya Perlacf588472010-02-14 21:22:01 +00005149 status = be_setup(adapter);
5150 if (status)
5151 goto err;
5152
5153 if (netif_running(netdev)) {
5154 status = be_open(netdev);
5155 if (status)
5156 goto err;
5157 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005158
5159 schedule_delayed_work(&adapter->func_recovery_work,
5160 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005161 netif_device_attach(netdev);
5162 return;
5163err:
5164 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005165}
5166
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005167static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005168 .error_detected = be_eeh_err_detected,
5169 .slot_reset = be_eeh_reset,
5170 .resume = be_eeh_resume,
5171};
5172
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005173static struct pci_driver be_driver = {
5174 .name = DRV_NAME,
5175 .id_table = be_dev_ids,
5176 .probe = be_probe,
5177 .remove = be_remove,
5178 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005179 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005180 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005181 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005182};
5183
5184static int __init be_init_module(void)
5185{
Joe Perches8e95a202009-12-03 07:58:21 +00005186 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5187 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005188 printk(KERN_WARNING DRV_NAME
5189 " : Module param rx_frag_size must be 2048/4096/8192."
5190 " Using 2048\n");
5191 rx_frag_size = 2048;
5192 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005193
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005194 return pci_register_driver(&be_driver);
5195}
5196module_init(be_init_module);
5197
5198static void __exit be_exit_module(void)
5199{
5200 pci_unregister_driver(&be_driver);
5201}
5202module_exit(be_exit_module);