blob: 597c463e384d0d5d9fb0f46a9b363e9c9165c6cf [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
Kalesh APe2fb1af2014-09-19 15:46:58 +053089
Ajit Khaparde7c185272010-07-29 06:16:33 +000090/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070091static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000092 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530113 "ECRC",
114 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700115 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000123 "Unknown"
124};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530129
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530138 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 return 0;
151}
152
Somnath Kotur68c45a22013-03-14 02:42:07 +0000153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154{
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530158 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Somnath Kotur68c45a22013-03-14 02:42:07 +0000172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000194
195 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197}
198
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530212 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530215
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530234
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000271 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 }
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000290 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000291 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
dingtianhong61d23e92013-12-30 15:40:43 +0800296 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 status = -EPERM;
298 goto err;
299 }
300
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530302 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 return 0;
304err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 return status;
307}
308
Sathya Perlaca34fe32012-11-06 17:48:56 +0000309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
Ajit Khaparde61000862013-10-03 16:16:33 -0500440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530484 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500492}
493
Selvin Xavier005d5692011-05-16 07:36:35 +0000494static void populate_lancer_stats(struct be_adapter *adapter)
495{
Selvin Xavier005d5692011-05-16 07:36:35 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000528 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530
Sathya Perla09c1c682011-08-22 19:41:53 +0000531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
Jingoo Han4188e7d2013-08-05 18:02:02 +0900543static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530544 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000556void be_parse_stats(struct be_adapter *adapter)
557{
Ajit Khaparde61000862013-10-03 16:16:33 -0500558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559 struct be_rx_obj *rxo;
560 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000561 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000562
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else if (BE3_chip(adapter))
569 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 else
572 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573
Ajit Khaparde61000862013-10-03 16:16:33 -0500574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000578 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000579 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000580}
581
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530583 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000586 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000588 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 u64 pkts, bytes;
590 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 }
607
Sathya Perla3c8def92011-06-12 20:01:58 +0000608 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000609 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530610
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648}
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct net_device *netdev = adapter->netdev;
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000655 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530659 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
Sathya Perla3c8def92011-06-12 20:01:58 +0000665static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530666 u32 wrb_cnt, u32 copied, u32 gso_segs,
667 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668{
Sathya Perla3c8def92011-06-12 20:01:58 +0000669 struct be_tx_stats *stats = tx_stats(txo);
670
Sathya Perlaab1594e2011-07-25 19:10:15 +0000671 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_reqs++;
673 stats->tx_wrbs += wrb_cnt;
674 stats->tx_bytes += copied;
675 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000677 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000678 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
681/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000682static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530683 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700685 int cnt = (skb->len > skb->data_len);
686
687 cnt += skb_shinfo(skb)->nr_frags;
688
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* to account for hdr wrb */
690 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 if (lancer_chip(adapter) || !(cnt & 1)) {
692 *dummy = false;
693 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694 /* add a dummy to make it an even num */
695 cnt++;
696 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000697 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
699 return cnt;
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
704 wrb->frag_pa_hi = upper_32_bits(addr);
705 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
706 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000707 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708}
709
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530711 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000712{
713 u8 vlan_prio;
714 u16 vlan_tag;
715
716 vlan_tag = vlan_tx_tag_get(skb);
717 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 /* If vlan priority provided by OS is NOT in available bmap */
719 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
720 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
721 adapter->recommended_prio;
722
723 return vlan_tag;
724}
725
Sathya Perlac9c47142014-03-27 10:46:19 +0530726/* Used only for IP tunnel packets */
727static u16 skb_inner_ip_proto(struct sk_buff *skb)
728{
729 return (inner_ip_hdr(skb)->version == 4) ?
730 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
731}
732
733static u16 skb_ip_proto(struct sk_buff *skb)
734{
735 return (ip_hdr(skb)->version == 4) ?
736 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
737}
738
Somnath Koturcc4ce022010-10-21 07:11:14 -0700739static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530740 struct sk_buff *skb, u32 wrb_cnt, u32 len,
741 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742{
Sathya Perlac9c47142014-03-27 10:46:19 +0530743 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700744
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 memset(hdr, 0, sizeof(*hdr));
746
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530747 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000749 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530750 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
751 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000752 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530753 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530755 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530756 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530757 proto = skb_inner_ip_proto(skb);
758 } else {
759 proto = skb_ip_proto(skb);
760 }
761 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530762 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530763 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530764 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 }
766
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700767 if (vlan_tx_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530768 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000769 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530770 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 }
772
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000773 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530774 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
775 SET_TX_WRB_HDR_BITS(event, hdr, 1);
776 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
777 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778}
779
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000780static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530781 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000782{
783 dma_addr_t dma;
784
785 be_dws_le_to_cpu(wrb, sizeof(*wrb));
786
787 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000788 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000789 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000790 dma_unmap_single(dev, dma, wrb->frag_len,
791 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000792 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000793 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000794 }
795}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796
Sathya Perla3c8def92011-06-12 20:01:58 +0000797static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530798 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
799 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800{
Sathya Perla7101e112010-03-22 20:41:12 +0000801 dma_addr_t busaddr;
802 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000803 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 struct be_eth_wrb *wrb;
806 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000807 bool map_single = false;
808 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810 hdr = queue_head_node(txq);
811 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000812 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700815 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530816
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000817 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
818 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000819 goto dma_err;
820 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 wrb = queue_head_node(txq);
822 wrb_fill(wrb, busaddr, len);
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
825 copied += len;
826 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530829 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530830
Ian Campbellb061b392011-08-29 23:18:23 +0000831 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000833 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000834 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700835 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000836 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000839 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 }
841
842 if (dummy_wrb) {
843 wrb = queue_head_node(txq);
844 wrb_fill(wrb, 0, 0);
845 be_dws_cpu_to_le(wrb, sizeof(*wrb));
846 queue_head_inc(txq);
847 }
848
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000849 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 be_dws_cpu_to_le(hdr, sizeof(*hdr));
851
852 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000853dma_err:
854 txq->head = map_head;
855 while (copied) {
856 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000857 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000858 map_single = false;
859 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530860 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000861 queue_head_inc(txq);
862 }
863 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864}
865
Somnath Kotur93040ae2012-06-26 22:32:10 +0000866static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 struct sk_buff *skb,
868 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869{
870 u16 vlan_tag = 0;
871
872 skb = skb_share_check(skb, GFP_ATOMIC);
873 if (unlikely(!skb))
874 return skb;
875
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000876 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000877 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530878
879 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
880 if (!vlan_tag)
881 vlan_tag = adapter->pvid;
882 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
883 * skip VLAN insertion
884 */
885 if (skip_hw_vlan)
886 *skip_hw_vlan = true;
887 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000888
889 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400890 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000891 if (unlikely(!skb))
892 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000893 skb->vlan_tci = 0;
894 }
895
896 /* Insert the outer VLAN, if any */
897 if (adapter->qnq_vid) {
898 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400899 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000900 if (unlikely(!skb))
901 return skb;
902 if (skip_hw_vlan)
903 *skip_hw_vlan = true;
904 }
905
Somnath Kotur93040ae2012-06-26 22:32:10 +0000906 return skb;
907}
908
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000909static bool be_ipv6_exthdr_check(struct sk_buff *skb)
910{
911 struct ethhdr *eh = (struct ethhdr *)skb->data;
912 u16 offset = ETH_HLEN;
913
914 if (eh->h_proto == htons(ETH_P_IPV6)) {
915 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
916
917 offset += sizeof(struct ipv6hdr);
918 if (ip6h->nexthdr != NEXTHDR_TCP &&
919 ip6h->nexthdr != NEXTHDR_UDP) {
920 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530921 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000922
923 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
924 if (ehdr->hdrlen == 0xff)
925 return true;
926 }
927 }
928 return false;
929}
930
931static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
932{
933 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
934}
935
Sathya Perla748b5392014-05-09 13:29:13 +0530936static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000937{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000938 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000939}
940
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530941static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
942 struct sk_buff *skb,
943 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700944{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000945 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000946 unsigned int eth_hdr_len;
947 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000948
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000949 /* For padded packets, BE HW modifies tot_len field in IP header
950 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000951 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000952 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000953 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
954 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000955 if (skb->len <= 60 &&
956 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000957 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000958 ip = (struct iphdr *)ip_hdr(skb);
959 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
960 }
961
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000962 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530963 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000964 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530965 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000966 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530967 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000968
Somnath Kotur93040ae2012-06-26 22:32:10 +0000969 /* HW has a bug wherein it will calculate CSUM for VLAN
970 * pkts even though it is disabled.
971 * Manually insert VLAN in pkt.
972 */
973 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000974 vlan_tx_tag_present(skb)) {
975 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000976 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530977 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000978 }
979
980 /* HW may lockup when VLAN HW tagging is requested on
981 * certain ipv6 packets. Drop such pkts if the HW workaround to
982 * skip HW tagging is not enabled by FW.
983 */
984 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +0530985 (adapter->pvid || adapter->qnq_vid) &&
986 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000987 goto tx_drop;
988
989 /* Manual VLAN tag insertion to prevent:
990 * ASIC lockup when the ASIC inserts VLAN tag into
991 * certain ipv6 packets. Insert VLAN tags in driver,
992 * and set event, completion, vlan bits accordingly
993 * in the Tx WRB.
994 */
995 if (be_ipv6_tx_stall_chk(adapter, skb) &&
996 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000997 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000998 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530999 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001000 }
1001
Sathya Perlaee9c7992013-05-22 23:04:55 +00001002 return skb;
1003tx_drop:
1004 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301005err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001006 return NULL;
1007}
1008
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301009static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1010 struct sk_buff *skb,
1011 bool *skip_hw_vlan)
1012{
1013 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1014 * less may cause a transmit stall on that port. So the work-around is
1015 * to pad short packets (<= 32 bytes) to a 36-byte length.
1016 */
1017 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1018 if (skb_padto(skb, 36))
1019 return NULL;
1020 skb->len = 36;
1021 }
1022
1023 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1024 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1025 if (!skb)
1026 return NULL;
1027 }
1028
1029 return skb;
1030}
1031
Sathya Perlaee9c7992013-05-22 23:04:55 +00001032static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1033{
1034 struct be_adapter *adapter = netdev_priv(netdev);
1035 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1036 struct be_queue_info *txq = &txo->q;
1037 bool dummy_wrb, stopped = false;
1038 u32 wrb_cnt = 0, copied = 0;
1039 bool skip_hw_vlan = false;
1040 u32 start = txq->head;
1041
1042 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301043 if (!skb) {
1044 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001045 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301046 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001047
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001048 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001049
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001050 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1051 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001052 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001053 int gso_segs = skb_shinfo(skb)->gso_segs;
1054
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001055 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001056 BUG_ON(txo->sent_skb_list[start]);
1057 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001059 /* Ensure txq has space for the next skb; Else stop the queue
1060 * *BEFORE* ringing the tx doorbell, so that we serialze the
1061 * tx compls of the current transmit which'll wake up the queue
1062 */
Sathya Perla7101e112010-03-22 20:41:12 +00001063 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001064 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1065 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001066 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001067 stopped = true;
1068 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001070 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001071
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001072 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001073 } else {
1074 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301075 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001076 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 return NETDEV_TX_OK;
1079}
1080
1081static int be_change_mtu(struct net_device *netdev, int new_mtu)
1082{
1083 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301084 struct device *dev = &adapter->pdev->dev;
1085
1086 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1087 dev_info(dev, "MTU must be between %d and %d bytes\n",
1088 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089 return -EINVAL;
1090 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301091
1092 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301093 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094 netdev->mtu = new_mtu;
1095 return 0;
1096}
1097
1098/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001099 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1100 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001101 */
Sathya Perla10329df2012-06-05 19:37:18 +00001102static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103{
Vasundhara Volam50762662014-09-12 17:39:14 +05301104 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001105 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301106 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001107 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001108
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001109 /* No need to further configure vids if in promiscuous mode */
1110 if (adapter->promiscuous)
1111 return 0;
1112
Sathya Perla92bf14a2013-08-27 16:57:32 +05301113 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001114 goto set_vlan_promisc;
1115
1116 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301117 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1118 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001119
Kalesh AP4d567d92014-05-09 13:29:17 +05301120 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001121 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001122 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301123 if (addl_status(status) ==
1124 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001125 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301126 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001127 } else {
1128 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1129 /* hw VLAN filtering re-enabled. */
1130 status = be_cmd_rx_filter(adapter,
1131 BE_FLAGS_VLAN_PROMISC, OFF);
1132 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301133 dev_info(dev,
1134 "Disabling VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001135 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001136 }
1137 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001139
Sathya Perlab31c50a2009-09-17 10:30:13 -07001140 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001141
1142set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301143 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1144 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001145
1146 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1147 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301148 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001149 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1150 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301151 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001152 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001153}
1154
Patrick McHardy80d5c362013-04-19 02:04:28 +00001155static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001156{
1157 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001158 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001159
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001160 /* Packets with VID 0 are always received by Lancer by default */
1161 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301162 return status;
1163
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301164 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301165 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001166
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301167 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301168 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001169
Somnath Kotura6b74e02014-01-21 15:50:55 +05301170 status = be_vid_config(adapter);
1171 if (status) {
1172 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301173 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301174 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301175
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001176 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177}
1178
Patrick McHardy80d5c362013-04-19 02:04:28 +00001179static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001180{
1181 struct be_adapter *adapter = netdev_priv(netdev);
1182
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001183 /* Packets with VID 0 are always received by Lancer by default */
1184 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301185 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001186
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301187 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301188 adapter->vlans_added--;
1189
1190 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001191}
1192
Somnath kotur7ad09452014-03-03 14:24:43 +05301193static void be_clear_promisc(struct be_adapter *adapter)
1194{
1195 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301196 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301197
1198 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1199}
1200
Sathya Perlaa54769f2011-10-24 02:45:00 +00001201static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001202{
1203 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001204 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001205
1206 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001207 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001208 adapter->promiscuous = true;
1209 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001210 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001211
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001212 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001213 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301214 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001215 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001216 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001217 }
1218
Sathya Perlae7b909a2009-11-22 22:01:10 +00001219 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001220 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301221 netdev_mc_count(netdev) > be_max_mc(adapter))
1222 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001223
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001224 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1225 struct netdev_hw_addr *ha;
1226 int i = 1; /* First slot is claimed by the Primary MAC */
1227
1228 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1229 be_cmd_pmac_del(adapter, adapter->if_handle,
1230 adapter->pmac_id[i], 0);
1231 }
1232
Sathya Perla92bf14a2013-08-27 16:57:32 +05301233 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001234 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1235 adapter->promiscuous = true;
1236 goto done;
1237 }
1238
1239 netdev_for_each_uc_addr(ha, adapter->netdev) {
1240 adapter->uc_macs++; /* First slot is for Primary MAC */
1241 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1242 adapter->if_handle,
1243 &adapter->pmac_id[adapter->uc_macs], 0);
1244 }
1245 }
1246
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001247 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301248 if (!status) {
1249 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1250 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1251 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001252 }
Kalesh APa0794882014-05-30 19:06:23 +05301253
1254set_mcast_promisc:
1255 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1256 return;
1257
1258 /* Set to MCAST promisc mode if setting MULTICAST address fails
1259 * or if num configured exceeds what we support
1260 */
1261 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1262 if (!status)
1263 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001264done:
1265 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001266}
1267
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001268static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1269{
1270 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001271 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001272 int status;
1273
Sathya Perla11ac75e2011-12-13 00:58:50 +00001274 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001275 return -EPERM;
1276
Sathya Perla11ac75e2011-12-13 00:58:50 +00001277 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001278 return -EINVAL;
1279
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301280 /* Proceed further only if user provided MAC is different
1281 * from active MAC
1282 */
1283 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1284 return 0;
1285
Sathya Perla3175d8c2013-07-23 15:25:03 +05301286 if (BEx_chip(adapter)) {
1287 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1288 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001289
Sathya Perla11ac75e2011-12-13 00:58:50 +00001290 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1291 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301292 } else {
1293 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1294 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001295 }
1296
Kalesh APabccf232014-07-17 16:20:24 +05301297 if (status) {
1298 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1299 mac, vf, status);
1300 return be_cmd_status(status);
1301 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001302
Kalesh APabccf232014-07-17 16:20:24 +05301303 ether_addr_copy(vf_cfg->mac_addr, mac);
1304
1305 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001306}
1307
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001308static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301309 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001310{
1311 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001312 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001313
Sathya Perla11ac75e2011-12-13 00:58:50 +00001314 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001315 return -EPERM;
1316
Sathya Perla11ac75e2011-12-13 00:58:50 +00001317 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001318 return -EINVAL;
1319
1320 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001321 vi->max_tx_rate = vf_cfg->tx_rate;
1322 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001323 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1324 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001325 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301326 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001327
1328 return 0;
1329}
1330
Sathya Perla748b5392014-05-09 13:29:13 +05301331static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001332{
1333 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001334 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001335 int status = 0;
1336
Sathya Perla11ac75e2011-12-13 00:58:50 +00001337 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001338 return -EPERM;
1339
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001340 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001341 return -EINVAL;
1342
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001343 if (vlan || qos) {
1344 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301345 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001346 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1347 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001348 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001349 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301350 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1351 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001352 }
1353
Kalesh APabccf232014-07-17 16:20:24 +05301354 if (status) {
1355 dev_err(&adapter->pdev->dev,
1356 "VLAN %d config on VF %d failed : %#x\n", vlan,
1357 vf, status);
1358 return be_cmd_status(status);
1359 }
1360
1361 vf_cfg->vlan_tag = vlan;
1362
1363 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001364}
1365
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001366static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1367 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001368{
1369 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301370 struct device *dev = &adapter->pdev->dev;
1371 int percent_rate, status = 0;
1372 u16 link_speed = 0;
1373 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001374
Sathya Perla11ac75e2011-12-13 00:58:50 +00001375 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001376 return -EPERM;
1377
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001378 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001379 return -EINVAL;
1380
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001381 if (min_tx_rate)
1382 return -EINVAL;
1383
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301384 if (!max_tx_rate)
1385 goto config_qos;
1386
1387 status = be_cmd_link_status_query(adapter, &link_speed,
1388 &link_status, 0);
1389 if (status)
1390 goto err;
1391
1392 if (!link_status) {
1393 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301394 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301395 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001396 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001397
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301398 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1399 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1400 link_speed);
1401 status = -EINVAL;
1402 goto err;
1403 }
1404
1405 /* On Skyhawk the QOS setting must be done only as a % value */
1406 percent_rate = link_speed / 100;
1407 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1408 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1409 percent_rate);
1410 status = -EINVAL;
1411 goto err;
1412 }
1413
1414config_qos:
1415 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001416 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301417 goto err;
1418
1419 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1420 return 0;
1421
1422err:
1423 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1424 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301425 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001426}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301427
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301428static int be_set_vf_link_state(struct net_device *netdev, int vf,
1429 int link_state)
1430{
1431 struct be_adapter *adapter = netdev_priv(netdev);
1432 int status;
1433
1434 if (!sriov_enabled(adapter))
1435 return -EPERM;
1436
1437 if (vf >= adapter->num_vfs)
1438 return -EINVAL;
1439
1440 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301441 if (status) {
1442 dev_err(&adapter->pdev->dev,
1443 "Link state change on VF %d failed: %#x\n", vf, status);
1444 return be_cmd_status(status);
1445 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301446
Kalesh APabccf232014-07-17 16:20:24 +05301447 adapter->vf_cfg[vf].plink_tracking = link_state;
1448
1449 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301450}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001451
Sathya Perla2632baf2013-10-01 16:00:00 +05301452static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1453 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454{
Sathya Perla2632baf2013-10-01 16:00:00 +05301455 aic->rx_pkts_prev = rx_pkts;
1456 aic->tx_reqs_prev = tx_pkts;
1457 aic->jiffies = now;
1458}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001459
Sathya Perla2632baf2013-10-01 16:00:00 +05301460static void be_eqd_update(struct be_adapter *adapter)
1461{
1462 struct be_set_eqd set_eqd[MAX_EVT_QS];
1463 int eqd, i, num = 0, start;
1464 struct be_aic_obj *aic;
1465 struct be_eq_obj *eqo;
1466 struct be_rx_obj *rxo;
1467 struct be_tx_obj *txo;
1468 u64 rx_pkts, tx_pkts;
1469 ulong now;
1470 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001471
Sathya Perla2632baf2013-10-01 16:00:00 +05301472 for_all_evt_queues(adapter, eqo, i) {
1473 aic = &adapter->aic_obj[eqo->idx];
1474 if (!aic->enable) {
1475 if (aic->jiffies)
1476 aic->jiffies = 0;
1477 eqd = aic->et_eqd;
1478 goto modify_eqd;
1479 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480
Sathya Perla2632baf2013-10-01 16:00:00 +05301481 rxo = &adapter->rx_obj[eqo->idx];
1482 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001483 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301484 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001485 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001486
Sathya Perla2632baf2013-10-01 16:00:00 +05301487 txo = &adapter->tx_obj[eqo->idx];
1488 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001489 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301490 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001491 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001492
Sathya Perla2632baf2013-10-01 16:00:00 +05301493 /* Skip, if wrapped around or first calculation */
1494 now = jiffies;
1495 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1496 rx_pkts < aic->rx_pkts_prev ||
1497 tx_pkts < aic->tx_reqs_prev) {
1498 be_aic_update(aic, rx_pkts, tx_pkts, now);
1499 continue;
1500 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001501
Sathya Perla2632baf2013-10-01 16:00:00 +05301502 delta = jiffies_to_msecs(now - aic->jiffies);
1503 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1504 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1505 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001506
Sathya Perla2632baf2013-10-01 16:00:00 +05301507 if (eqd < 8)
1508 eqd = 0;
1509 eqd = min_t(u32, eqd, aic->max_eqd);
1510 eqd = max_t(u32, eqd, aic->min_eqd);
1511
1512 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001513modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301514 if (eqd != aic->prev_eqd) {
1515 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1516 set_eqd[num].eq_id = eqo->q.id;
1517 aic->prev_eqd = eqd;
1518 num++;
1519 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001520 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301521
1522 if (num)
1523 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001524}
1525
Sathya Perla3abcded2010-10-03 22:12:27 -07001526static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301527 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001528{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001529 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001530
Sathya Perlaab1594e2011-07-25 19:10:15 +00001531 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001532 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001533 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001534 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001535 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001536 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001537 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001538 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001539 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540}
1541
Sathya Perla2e588f82011-03-11 02:49:26 +00001542static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001543{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001544 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301545 * Also ignore ipcksm for ipv6 pkts
1546 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001547 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301548 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001549}
1550
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301551static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001552{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001553 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001555 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301556 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001557
Sathya Perla3abcded2010-10-03 22:12:27 -07001558 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 BUG_ON(!rx_page_info->page);
1560
Sathya Perlae50287b2014-03-04 12:14:38 +05301561 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001562 dma_unmap_page(&adapter->pdev->dev,
1563 dma_unmap_addr(rx_page_info, bus),
1564 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301565 rx_page_info->last_frag = false;
1566 } else {
1567 dma_sync_single_for_cpu(&adapter->pdev->dev,
1568 dma_unmap_addr(rx_page_info, bus),
1569 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001570 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001571
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301572 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001573 atomic_dec(&rxq->used);
1574 return rx_page_info;
1575}
1576
1577/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001578static void be_rx_compl_discard(struct be_rx_obj *rxo,
1579 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001582 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001583
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001584 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301585 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001586 put_page(page_info->page);
1587 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588 }
1589}
1590
1591/*
1592 * skb_fill_rx_data forms a complete skb for an ether frame
1593 * indicated by rxcp.
1594 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001595static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1596 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001599 u16 i, j;
1600 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601 u8 *start;
1602
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301603 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001604 start = page_address(page_info->page) + page_info->page_offset;
1605 prefetch(start);
1606
1607 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001608 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001609
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610 skb->len = curr_frag_len;
1611 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001612 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001613 /* Complete packet has now been moved to data */
1614 put_page(page_info->page);
1615 skb->data_len = 0;
1616 skb->tail += curr_frag_len;
1617 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001618 hdr_len = ETH_HLEN;
1619 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001620 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001621 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622 skb_shinfo(skb)->frags[0].page_offset =
1623 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301624 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1625 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001626 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001627 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001628 skb->tail += hdr_len;
1629 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001630 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001631
Sathya Perla2e588f82011-03-11 02:49:26 +00001632 if (rxcp->pkt_size <= rx_frag_size) {
1633 BUG_ON(rxcp->num_rcvd != 1);
1634 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635 }
1636
1637 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001638 remaining = rxcp->pkt_size - curr_frag_len;
1639 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301640 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001641 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001643 /* Coalesce all frags from the same physical page in one slot */
1644 if (page_info->page_offset == 0) {
1645 /* Fresh page */
1646 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001647 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001648 skb_shinfo(skb)->frags[j].page_offset =
1649 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001650 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001651 skb_shinfo(skb)->nr_frags++;
1652 } else {
1653 put_page(page_info->page);
1654 }
1655
Eric Dumazet9e903e02011-10-18 21:00:24 +00001656 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001657 skb->len += curr_frag_len;
1658 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001659 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001660 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001661 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001662 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001663 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664}
1665
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001666/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301667static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001668 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001669{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001670 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001671 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001672 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001673
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001674 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001675 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001676 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001677 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001678 return;
1679 }
1680
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001681 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001682
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001683 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001684 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001685 else
1686 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001688 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001689 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001690 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001691 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301692
Tom Herbertb6c0e892014-08-27 21:27:17 -07001693 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301694 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001695
Jiri Pirko343e43c2011-08-25 02:50:51 +00001696 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001697 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001698
1699 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001700}
1701
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001702/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001703static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1704 struct napi_struct *napi,
1705 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001706{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001707 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001709 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001710 u16 remaining, curr_frag_len;
1711 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001712
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001713 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001714 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001715 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001716 return;
1717 }
1718
Sathya Perla2e588f82011-03-11 02:49:26 +00001719 remaining = rxcp->pkt_size;
1720 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301721 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722
1723 curr_frag_len = min(remaining, rx_frag_size);
1724
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001725 /* Coalesce all frags from the same physical page in one slot */
1726 if (i == 0 || page_info->page_offset == 0) {
1727 /* First frag or Fresh page */
1728 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001729 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001730 skb_shinfo(skb)->frags[j].page_offset =
1731 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001732 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001733 } else {
1734 put_page(page_info->page);
1735 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001736 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001737 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001738 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739 memset(page_info, 0, sizeof(*page_info));
1740 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001741 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001743 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001744 skb->len = rxcp->pkt_size;
1745 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001746 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001747 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001748 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001749 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301750
Tom Herbertb6c0e892014-08-27 21:27:17 -07001751 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301752 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001753
Jiri Pirko343e43c2011-08-25 02:50:51 +00001754 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001755 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001756
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001757 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001758}
1759
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001760static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1761 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001762{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301763 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1764 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1765 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1766 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1767 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1768 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1769 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1770 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1771 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1772 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1773 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001774 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301775 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1776 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001777 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301778 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301779 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301780 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001781}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001782
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001783static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1784 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001785{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301786 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1787 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1788 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1789 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1790 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1791 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1792 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1793 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1794 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1795 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1796 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001797 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301798 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1799 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001800 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301801 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1802 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001803}
1804
1805static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1806{
1807 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1808 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1809 struct be_adapter *adapter = rxo->adapter;
1810
1811 /* For checking the valid bit it is Ok to use either definition as the
1812 * valid bit is at the same position in both v0 and v1 Rx compl */
1813 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001814 return NULL;
1815
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001816 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001817 be_dws_le_to_cpu(compl, sizeof(*compl));
1818
1819 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001820 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001821 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001822 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001823
Somnath Koture38b1702013-05-29 22:55:56 +00001824 if (rxcp->ip_frag)
1825 rxcp->l4_csum = 0;
1826
Sathya Perla15d72182011-03-21 20:49:26 +00001827 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301828 /* In QNQ modes, if qnq bit is not set, then the packet was
1829 * tagged only with the transparent outer vlan-tag and must
1830 * not be treated as a vlan packet by host
1831 */
1832 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001833 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001834
Sathya Perla15d72182011-03-21 20:49:26 +00001835 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001836 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001837
Somnath Kotur939cf302011-08-18 21:51:49 -07001838 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301839 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001840 rxcp->vlanf = 0;
1841 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001842
1843 /* As the compl has been parsed, reset it; we wont touch it again */
1844 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001845
Sathya Perla3abcded2010-10-03 22:12:27 -07001846 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001847 return rxcp;
1848}
1849
Eric Dumazet1829b082011-03-01 05:48:12 +00001850static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001853
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001854 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001855 gfp |= __GFP_COMP;
1856 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001857}
1858
1859/*
1860 * Allocate a page, split it to fragments of size rx_frag_size and post as
1861 * receive buffers to BE
1862 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301863static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001864{
Sathya Perla3abcded2010-10-03 22:12:27 -07001865 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001866 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001867 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001868 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001869 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001870 struct be_eth_rx_d *rxd;
1871 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301872 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001873
Sathya Perla3abcded2010-10-03 22:12:27 -07001874 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301875 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001877 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001878 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001879 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001880 break;
1881 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001882 page_dmaaddr = dma_map_page(dev, pagep, 0,
1883 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001884 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001885 if (dma_mapping_error(dev, page_dmaaddr)) {
1886 put_page(pagep);
1887 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301888 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001889 break;
1890 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301891 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001892 } else {
1893 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301894 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001895 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301896 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001897 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898
1899 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301900 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001901 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1902 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903
1904 /* Any space left in the current big page for another frag? */
1905 if ((page_offset + rx_frag_size + rx_frag_size) >
1906 adapter->big_page_size) {
1907 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301908 page_info->last_frag = true;
1909 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1910 } else {
1911 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001913
1914 prev_page_info = page_info;
1915 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001916 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301918
1919 /* Mark the last frag of a page when we break out of the above loop
1920 * with no more slots available in the RXQ
1921 */
1922 if (pagep) {
1923 prev_page_info->last_frag = true;
1924 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1925 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926
1927 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001928 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301929 if (rxo->rx_post_starved)
1930 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301931 do {
1932 notify = min(256u, posted);
1933 be_rxq_notify(adapter, rxq->id, notify);
1934 posted -= notify;
1935 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001936 } else if (atomic_read(&rxq->used) == 0) {
1937 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001938 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001939 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940}
1941
Sathya Perla5fb379e2009-06-18 00:02:59 +00001942static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001943{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1945
1946 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1947 return NULL;
1948
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001949 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001950 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1951
1952 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1953
1954 queue_tail_inc(tx_cq);
1955 return txcp;
1956}
1957
Sathya Perla3c8def92011-06-12 20:01:58 +00001958static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301959 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001960{
Sathya Perla3c8def92011-06-12 20:01:58 +00001961 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001962 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001963 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001965 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1966 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001968 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001970 sent_skbs[txq->tail] = NULL;
1971
1972 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001973 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001975 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001977 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001978 unmap_tx_frag(&adapter->pdev->dev, wrb,
1979 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001980 unmap_skb_hdr = false;
1981
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982 num_wrbs++;
1983 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001984 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001985
Rick Jones96d49222014-08-28 08:53:16 -07001986 dev_consume_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001987 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001988}
1989
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001990/* Return the number of events in the event queue */
1991static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001992{
1993 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001994 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001995
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001996 do {
1997 eqe = queue_tail_node(&eqo->q);
1998 if (eqe->evt == 0)
1999 break;
2000
2001 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002002 eqe->evt = 0;
2003 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002004 queue_tail_inc(&eqo->q);
2005 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002006
2007 return num;
2008}
2009
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002010/* Leaves the EQ is disarmed state */
2011static void be_eq_clean(struct be_eq_obj *eqo)
2012{
2013 int num = events_get(eqo);
2014
2015 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2016}
2017
2018static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002019{
2020 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002021 struct be_queue_info *rxq = &rxo->q;
2022 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002023 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002024 struct be_adapter *adapter = rxo->adapter;
2025 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002026
Sathya Perlad23e9462012-12-17 19:38:51 +00002027 /* Consume pending rx completions.
2028 * Wait for the flush completion (identified by zero num_rcvd)
2029 * to arrive. Notify CQ even when there are no more CQ entries
2030 * for HW to flush partially coalesced CQ entries.
2031 * In Lancer, there is no need to wait for flush compl.
2032 */
2033 for (;;) {
2034 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302035 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002036 if (lancer_chip(adapter))
2037 break;
2038
2039 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2040 dev_warn(&adapter->pdev->dev,
2041 "did not receive flush compl\n");
2042 break;
2043 }
2044 be_cq_notify(adapter, rx_cq->id, true, 0);
2045 mdelay(1);
2046 } else {
2047 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002048 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002049 if (rxcp->num_rcvd == 0)
2050 break;
2051 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002052 }
2053
Sathya Perlad23e9462012-12-17 19:38:51 +00002054 /* After cleanup, leave the CQ in unarmed state */
2055 be_cq_notify(adapter, rx_cq->id, false, 0);
2056
2057 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302058 while (atomic_read(&rxq->used) > 0) {
2059 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002060 put_page(page_info->page);
2061 memset(page_info, 0, sizeof(*page_info));
2062 }
2063 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302064 rxq->tail = 0;
2065 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066}
2067
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002068static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002070 struct be_tx_obj *txo;
2071 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002072 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002073 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002074 struct sk_buff *sent_skb;
2075 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002076 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302078 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002079 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002080 pending_txqs = adapter->num_tx_qs;
2081
2082 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302083 cmpl = 0;
2084 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002085 txq = &txo->q;
2086 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302087 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002088 num_wrbs += be_tx_compl_process(adapter, txo,
2089 end_idx);
2090 cmpl++;
2091 }
2092 if (cmpl) {
2093 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2094 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302095 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002096 }
2097 if (atomic_read(&txq->used) == 0)
2098 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002099 }
2100
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302101 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002102 break;
2103
2104 mdelay(1);
2105 } while (true);
2106
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002107 for_all_tx_queues(adapter, txo, i) {
2108 txq = &txo->q;
2109 if (atomic_read(&txq->used))
2110 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2111 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002112
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002113 /* free posted tx for which compls will never arrive */
2114 while (atomic_read(&txq->used)) {
2115 sent_skb = txo->sent_skb_list[txq->tail];
2116 end_idx = txq->tail;
2117 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2118 &dummy_wrb);
2119 index_adv(&end_idx, num_wrbs - 1, txq->len);
2120 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2121 atomic_sub(num_wrbs, &txq->used);
2122 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002123 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124}
2125
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126static void be_evt_queues_destroy(struct be_adapter *adapter)
2127{
2128 struct be_eq_obj *eqo;
2129 int i;
2130
2131 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002132 if (eqo->q.created) {
2133 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002134 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302135 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302136 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002137 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002138 be_queue_free(adapter, &eqo->q);
2139 }
2140}
2141
2142static int be_evt_queues_create(struct be_adapter *adapter)
2143{
2144 struct be_queue_info *eq;
2145 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302146 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002147 int i, rc;
2148
Sathya Perla92bf14a2013-08-27 16:57:32 +05302149 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2150 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002151
2152 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302153 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2154 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302155 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302156 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002158 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302159 aic->max_eqd = BE_MAX_EQD;
2160 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002161
2162 eq = &eqo->q;
2163 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302164 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165 if (rc)
2166 return rc;
2167
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302168 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169 if (rc)
2170 return rc;
2171 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002172 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173}
2174
Sathya Perla5fb379e2009-06-18 00:02:59 +00002175static void be_mcc_queues_destroy(struct be_adapter *adapter)
2176{
2177 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002178
Sathya Perla8788fdc2009-07-27 22:52:03 +00002179 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002180 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002181 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002182 be_queue_free(adapter, q);
2183
Sathya Perla8788fdc2009-07-27 22:52:03 +00002184 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002185 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002186 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002187 be_queue_free(adapter, q);
2188}
2189
2190/* Must be called only after TX qs are created as MCC shares TX EQ */
2191static int be_mcc_queues_create(struct be_adapter *adapter)
2192{
2193 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002194
Sathya Perla8788fdc2009-07-27 22:52:03 +00002195 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002196 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302197 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002198 goto err;
2199
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 /* Use the default EQ for MCC completions */
2201 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002202 goto mcc_cq_free;
2203
Sathya Perla8788fdc2009-07-27 22:52:03 +00002204 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002205 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2206 goto mcc_cq_destroy;
2207
Sathya Perla8788fdc2009-07-27 22:52:03 +00002208 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002209 goto mcc_q_free;
2210
2211 return 0;
2212
2213mcc_q_free:
2214 be_queue_free(adapter, q);
2215mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002216 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002217mcc_cq_free:
2218 be_queue_free(adapter, cq);
2219err:
2220 return -1;
2221}
2222
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223static void be_tx_queues_destroy(struct be_adapter *adapter)
2224{
2225 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002226 struct be_tx_obj *txo;
2227 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228
Sathya Perla3c8def92011-06-12 20:01:58 +00002229 for_all_tx_queues(adapter, txo, i) {
2230 q = &txo->q;
2231 if (q->created)
2232 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2233 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234
Sathya Perla3c8def92011-06-12 20:01:58 +00002235 q = &txo->cq;
2236 if (q->created)
2237 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2238 be_queue_free(adapter, q);
2239 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240}
2241
Sathya Perla77071332013-08-27 16:57:34 +05302242static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002244 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002245 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302246 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247
Sathya Perla92bf14a2013-08-27 16:57:32 +05302248 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002249
Sathya Perla3c8def92011-06-12 20:01:58 +00002250 for_all_tx_queues(adapter, txo, i) {
2251 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2253 sizeof(struct be_eth_tx_compl));
2254 if (status)
2255 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256
John Stultz827da442013-10-07 15:51:58 -07002257 u64_stats_init(&txo->stats.sync);
2258 u64_stats_init(&txo->stats.sync_compl);
2259
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002260 /* If num_evt_qs is less than num_tx_qs, then more than
2261 * one txq share an eq
2262 */
2263 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2264 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2265 if (status)
2266 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002268 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2269 sizeof(struct be_eth_wrb));
2270 if (status)
2271 return status;
2272
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002273 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 if (status)
2275 return status;
2276 }
2277
Sathya Perlad3791422012-09-28 04:39:44 +00002278 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2279 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002280 return 0;
2281}
2282
2283static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284{
2285 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002286 struct be_rx_obj *rxo;
2287 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288
Sathya Perla3abcded2010-10-03 22:12:27 -07002289 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002290 q = &rxo->cq;
2291 if (q->created)
2292 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2293 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295}
2296
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002298{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002300 struct be_rx_obj *rxo;
2301 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302
Sathya Perla92bf14a2013-08-27 16:57:32 +05302303 /* We can create as many RSS rings as there are EQs. */
2304 adapter->num_rx_qs = adapter->num_evt_qs;
2305
2306 /* We'll use RSS only if atleast 2 RSS rings are supported.
2307 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002308 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302309 if (adapter->num_rx_qs > 1)
2310 adapter->num_rx_qs++;
2311
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 for_all_rx_queues(adapter, rxo, i) {
2314 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 cq = &rxo->cq;
2316 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302317 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002318 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002319 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320
John Stultz827da442013-10-07 15:51:58 -07002321 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2323 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002324 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002325 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002326 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327
Sathya Perlad3791422012-09-28 04:39:44 +00002328 dev_info(&adapter->pdev->dev,
2329 "created %d RSS queue(s) and 1 default RX queue\n",
2330 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002332}
2333
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334static irqreturn_t be_intx(int irq, void *dev)
2335{
Sathya Perlae49cc342012-11-27 19:50:02 +00002336 struct be_eq_obj *eqo = dev;
2337 struct be_adapter *adapter = eqo->adapter;
2338 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002339
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002340 /* IRQ is not expected when NAPI is scheduled as the EQ
2341 * will not be armed.
2342 * But, this can happen on Lancer INTx where it takes
2343 * a while to de-assert INTx or in BE2 where occasionaly
2344 * an interrupt may be raised even when EQ is unarmed.
2345 * If NAPI is already scheduled, then counting & notifying
2346 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002347 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002348 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002349 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002350 __napi_schedule(&eqo->napi);
2351 if (num_evts)
2352 eqo->spurious_intr = 0;
2353 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002354 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002355
2356 /* Return IRQ_HANDLED only for the the first spurious intr
2357 * after a valid intr to stop the kernel from branding
2358 * this irq as a bad one!
2359 */
2360 if (num_evts || eqo->spurious_intr++ == 0)
2361 return IRQ_HANDLED;
2362 else
2363 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002364}
2365
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002366static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002368 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369
Sathya Perla0b545a62012-11-23 00:27:18 +00002370 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2371 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372 return IRQ_HANDLED;
2373}
2374
Sathya Perla2e588f82011-03-11 02:49:26 +00002375static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376{
Somnath Koture38b1702013-05-29 22:55:56 +00002377 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378}
2379
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302381 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382{
Sathya Perla3abcded2010-10-03 22:12:27 -07002383 struct be_adapter *adapter = rxo->adapter;
2384 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002385 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302387 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388
2389 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002390 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002391 if (!rxcp)
2392 break;
2393
Sathya Perla12004ae2011-08-02 19:57:46 +00002394 /* Is it a flush compl that has no data */
2395 if (unlikely(rxcp->num_rcvd == 0))
2396 goto loop_continue;
2397
2398 /* Discard compl with partial DMA Lancer B0 */
2399 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002400 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002401 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002402 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002403
Sathya Perla12004ae2011-08-02 19:57:46 +00002404 /* On BE drop pkts that arrive due to imperfect filtering in
2405 * promiscuous mode on some skews
2406 */
2407 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302408 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002409 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002410 goto loop_continue;
2411 }
2412
Sathya Perla6384a4d2013-10-25 10:40:16 +05302413 /* Don't do gro when we're busy_polling */
2414 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002416 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302417 be_rx_compl_process(rxo, napi, rxcp);
2418
Sathya Perla12004ae2011-08-02 19:57:46 +00002419loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302420 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002421 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002422 }
2423
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002424 if (work_done) {
2425 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002426
Sathya Perla6384a4d2013-10-25 10:40:16 +05302427 /* When an rx-obj gets into post_starved state, just
2428 * let be_worker do the posting.
2429 */
2430 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2431 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302432 be_post_rx_frags(rxo, GFP_ATOMIC,
2433 max_t(u32, MAX_RX_POST,
2434 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002435 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002436
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002437 return work_done;
2438}
2439
Kalesh AP512bb8a2014-09-02 09:56:49 +05302440static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2441{
2442 switch (status) {
2443 case BE_TX_COMP_HDR_PARSE_ERR:
2444 tx_stats(txo)->tx_hdr_parse_err++;
2445 break;
2446 case BE_TX_COMP_NDMA_ERR:
2447 tx_stats(txo)->tx_dma_err++;
2448 break;
2449 case BE_TX_COMP_ACL_ERR:
2450 tx_stats(txo)->tx_spoof_check_err++;
2451 break;
2452 }
2453}
2454
2455static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2456{
2457 switch (status) {
2458 case LANCER_TX_COMP_LSO_ERR:
2459 tx_stats(txo)->tx_tso_err++;
2460 break;
2461 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2462 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2463 tx_stats(txo)->tx_spoof_check_err++;
2464 break;
2465 case LANCER_TX_COMP_QINQ_ERR:
2466 tx_stats(txo)->tx_qinq_err++;
2467 break;
2468 case LANCER_TX_COMP_PARITY_ERR:
2469 tx_stats(txo)->tx_internal_parity_err++;
2470 break;
2471 case LANCER_TX_COMP_DMA_ERR:
2472 tx_stats(txo)->tx_dma_err++;
2473 break;
2474 }
2475}
2476
Sathya Perlac8f64612014-09-02 09:56:55 +05302477static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2478 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302481 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302482 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302483 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002484
Sathya Perlac8f64612014-09-02 09:56:55 +05302485 while ((txcp = be_tx_compl_get(&txo->cq))) {
2486 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2487 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2488 work_done++;
2489
Kalesh AP512bb8a2014-09-02 09:56:49 +05302490 compl_status = GET_TX_COMPL_BITS(status, txcp);
2491 if (compl_status) {
2492 if (lancer_chip(adapter))
2493 lancer_update_tx_err(txo, compl_status);
2494 else
2495 be_update_tx_err(txo, compl_status);
2496 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002497 }
2498
2499 if (work_done) {
2500 be_cq_notify(adapter, txo->cq.id, true, work_done);
2501 atomic_sub(num_wrbs, &txo->q.used);
2502
2503 /* As Tx wrbs have been freed up, wake up netdev queue
2504 * if it was stopped due to lack of tx wrbs. */
2505 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302506 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002507 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002508 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002509
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002510 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2511 tx_stats(txo)->tx_compl += work_done;
2512 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2513 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002514}
Sathya Perla3c8def92011-06-12 20:01:58 +00002515
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302516int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002517{
2518 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2519 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002520 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302521 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302522 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002523
Sathya Perla0b545a62012-11-23 00:27:18 +00002524 num_evts = events_get(eqo);
2525
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302526 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2527 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002528
Sathya Perla6384a4d2013-10-25 10:40:16 +05302529 if (be_lock_napi(eqo)) {
2530 /* This loop will iterate twice for EQ0 in which
2531 * completions of the last RXQ (default one) are also processed
2532 * For other EQs the loop iterates only once
2533 */
2534 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2535 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2536 max_work = max(work, max_work);
2537 }
2538 be_unlock_napi(eqo);
2539 } else {
2540 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002541 }
2542
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002543 if (is_mcc_eqo(eqo))
2544 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002545
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002546 if (max_work < budget) {
2547 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002548 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002549 } else {
2550 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002551 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002552 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002553 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002554}
2555
Sathya Perla6384a4d2013-10-25 10:40:16 +05302556#ifdef CONFIG_NET_RX_BUSY_POLL
2557static int be_busy_poll(struct napi_struct *napi)
2558{
2559 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2560 struct be_adapter *adapter = eqo->adapter;
2561 struct be_rx_obj *rxo;
2562 int i, work = 0;
2563
2564 if (!be_lock_busy_poll(eqo))
2565 return LL_FLUSH_BUSY;
2566
2567 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2568 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2569 if (work)
2570 break;
2571 }
2572
2573 be_unlock_busy_poll(eqo);
2574 return work;
2575}
2576#endif
2577
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002578void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002579{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002580 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2581 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002582 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302583 bool error_detected = false;
2584 struct device *dev = &adapter->pdev->dev;
2585 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002586
Sathya Perlad23e9462012-12-17 19:38:51 +00002587 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002588 return;
2589
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002590 if (lancer_chip(adapter)) {
2591 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2592 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2593 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302594 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002595 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302596 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302597 adapter->hw_error = true;
2598 /* Do not log error messages if its a FW reset */
2599 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2600 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2601 dev_info(dev, "Firmware update in progress\n");
2602 } else {
2603 error_detected = true;
2604 dev_err(dev, "Error detected in the card\n");
2605 dev_err(dev, "ERR: sliport status 0x%x\n",
2606 sliport_status);
2607 dev_err(dev, "ERR: sliport error1 0x%x\n",
2608 sliport_err1);
2609 dev_err(dev, "ERR: sliport error2 0x%x\n",
2610 sliport_err2);
2611 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002612 }
2613 } else {
2614 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302615 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002616 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302617 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002618 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302619 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002620 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302621 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002622
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002623 ue_lo = (ue_lo & ~ue_lo_mask);
2624 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002625
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302626 /* On certain platforms BE hardware can indicate spurious UEs.
2627 * Allow HW to stop working completely in case of a real UE.
2628 * Hence not setting the hw_error for UE detection.
2629 */
2630
2631 if (ue_lo || ue_hi) {
2632 error_detected = true;
2633 dev_err(dev,
2634 "Unrecoverable Error detected in the adapter");
2635 dev_err(dev, "Please reboot server to recover");
2636 if (skyhawk_chip(adapter))
2637 adapter->hw_error = true;
2638 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2639 if (ue_lo & 1)
2640 dev_err(dev, "UE: %s bit set\n",
2641 ue_status_low_desc[i]);
2642 }
2643 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2644 if (ue_hi & 1)
2645 dev_err(dev, "UE: %s bit set\n",
2646 ue_status_hi_desc[i]);
2647 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302648 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002649 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302650 if (error_detected)
2651 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002652}
2653
Sathya Perla8d56ff12009-11-22 22:02:26 +00002654static void be_msix_disable(struct be_adapter *adapter)
2655{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002656 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002657 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002658 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302659 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002660 }
2661}
2662
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002663static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002664{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002665 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002666 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002667
Sathya Perla92bf14a2013-08-27 16:57:32 +05302668 /* If RoCE is supported, program the max number of NIC vectors that
2669 * may be configured via set-channels, along with vectors needed for
2670 * RoCe. Else, just program the number we'll use initially.
2671 */
2672 if (be_roce_supported(adapter))
2673 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2674 2 * num_online_cpus());
2675 else
2676 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002677
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002678 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002679 adapter->msix_entries[i].entry = i;
2680
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002681 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2682 MIN_MSIX_VECTORS, num_vec);
2683 if (num_vec < 0)
2684 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002685
Sathya Perla92bf14a2013-08-27 16:57:32 +05302686 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2687 adapter->num_msix_roce_vec = num_vec / 2;
2688 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2689 adapter->num_msix_roce_vec);
2690 }
2691
2692 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2693
2694 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2695 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002696 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002697
2698fail:
2699 dev_warn(dev, "MSIx enable failed\n");
2700
2701 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2702 if (!be_physfn(adapter))
2703 return num_vec;
2704 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002705}
2706
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002707static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302708 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002709{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302710 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002711}
2712
2713static int be_msix_register(struct be_adapter *adapter)
2714{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 struct net_device *netdev = adapter->netdev;
2716 struct be_eq_obj *eqo;
2717 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 for_all_evt_queues(adapter, eqo, i) {
2720 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2721 vec = be_msix_vec_get(adapter, eqo);
2722 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002723 if (status)
2724 goto err_msix;
2725 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002726
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002727 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002728err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002729 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2730 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2731 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302732 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002733 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002734 return status;
2735}
2736
2737static int be_irq_register(struct be_adapter *adapter)
2738{
2739 struct net_device *netdev = adapter->netdev;
2740 int status;
2741
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002742 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002743 status = be_msix_register(adapter);
2744 if (status == 0)
2745 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002746 /* INTx is not supported for VF */
2747 if (!be_physfn(adapter))
2748 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002749 }
2750
Sathya Perlae49cc342012-11-27 19:50:02 +00002751 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002752 netdev->irq = adapter->pdev->irq;
2753 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002754 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002755 if (status) {
2756 dev_err(&adapter->pdev->dev,
2757 "INTx request IRQ failed - err %d\n", status);
2758 return status;
2759 }
2760done:
2761 adapter->isr_registered = true;
2762 return 0;
2763}
2764
2765static void be_irq_unregister(struct be_adapter *adapter)
2766{
2767 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002768 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002769 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002770
2771 if (!adapter->isr_registered)
2772 return;
2773
2774 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002775 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002776 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002777 goto done;
2778 }
2779
2780 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 for_all_evt_queues(adapter, eqo, i)
2782 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002783
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002784done:
2785 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786}
2787
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002788static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002789{
2790 struct be_queue_info *q;
2791 struct be_rx_obj *rxo;
2792 int i;
2793
2794 for_all_rx_queues(adapter, rxo, i) {
2795 q = &rxo->q;
2796 if (q->created) {
2797 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002799 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002800 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002801 }
2802}
2803
Sathya Perla889cd4b2010-05-30 23:33:45 +00002804static int be_close(struct net_device *netdev)
2805{
2806 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002807 struct be_eq_obj *eqo;
2808 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002809
Kalesh APe1ad8e32014-04-14 16:12:41 +05302810 /* This protection is needed as be_close() may be called even when the
2811 * adapter is in cleared state (after eeh perm failure)
2812 */
2813 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2814 return 0;
2815
Parav Pandit045508a2012-03-26 14:27:13 +00002816 be_roce_dev_close(adapter);
2817
Ivan Veceradff345c52013-11-27 08:59:32 +01002818 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2819 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002820 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302821 be_disable_busy_poll(eqo);
2822 }
David S. Miller71237b62013-11-28 18:53:36 -05002823 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002824 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002825
2826 be_async_mcc_disable(adapter);
2827
2828 /* Wait for all pending tx completions to arrive so that
2829 * all tx skbs are freed.
2830 */
Sathya Perlafba87552013-05-08 02:05:50 +00002831 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302832 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002833
2834 be_rx_qs_destroy(adapter);
2835
Ajit Khaparded11a3472013-11-18 10:44:37 -06002836 for (i = 1; i < (adapter->uc_macs + 1); i++)
2837 be_cmd_pmac_del(adapter, adapter->if_handle,
2838 adapter->pmac_id[i], 0);
2839 adapter->uc_macs = 0;
2840
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002841 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002842 if (msix_enabled(adapter))
2843 synchronize_irq(be_msix_vec_get(adapter, eqo));
2844 else
2845 synchronize_irq(netdev->irq);
2846 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002847 }
2848
Sathya Perla889cd4b2010-05-30 23:33:45 +00002849 be_irq_unregister(adapter);
2850
Sathya Perla482c9e72011-06-29 23:33:17 +00002851 return 0;
2852}
2853
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002854static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002855{
2856 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002857 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302858 u8 rss_hkey[RSS_HASH_KEY_LEN];
2859 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002860
2861 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002862 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2863 sizeof(struct be_eth_rx_d));
2864 if (rc)
2865 return rc;
2866 }
2867
2868 /* The FW would like the default RXQ to be created first */
2869 rxo = default_rxo(adapter);
2870 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2871 adapter->if_handle, false, &rxo->rss_id);
2872 if (rc)
2873 return rc;
2874
2875 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002876 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002877 rx_frag_size, adapter->if_handle,
2878 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002879 if (rc)
2880 return rc;
2881 }
2882
2883 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302884 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2885 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002886 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302887 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002888 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302889 rss->rsstable[j + i] = rxo->rss_id;
2890 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002891 }
2892 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302893 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2894 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002895
2896 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302897 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2898 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302899 } else {
2900 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302901 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302902 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002903
Venkata Duvvurue2557872014-04-21 15:38:00 +05302904 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302905 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302906 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302907 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302908 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302909 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002910 }
2911
Venkata Duvvurue2557872014-04-21 15:38:00 +05302912 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2913
Sathya Perla482c9e72011-06-29 23:33:17 +00002914 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002915 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302916 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002917 return 0;
2918}
2919
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002920static int be_open(struct net_device *netdev)
2921{
2922 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002923 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002924 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002925 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002926 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002927 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002928
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002929 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002930 if (status)
2931 goto err;
2932
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002933 status = be_irq_register(adapter);
2934 if (status)
2935 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002936
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002937 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002938 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002939
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002940 for_all_tx_queues(adapter, txo, i)
2941 be_cq_notify(adapter, txo->cq.id, true, 0);
2942
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002943 be_async_mcc_enable(adapter);
2944
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002945 for_all_evt_queues(adapter, eqo, i) {
2946 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302947 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302948 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002949 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002950 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002951
Sathya Perla323ff712012-09-28 04:39:43 +00002952 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002953 if (!status)
2954 be_link_status_update(adapter, link_status);
2955
Sathya Perlafba87552013-05-08 02:05:50 +00002956 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002957 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302958
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302959#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302960 if (skyhawk_chip(adapter))
2961 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302962#endif
2963
Sathya Perla889cd4b2010-05-30 23:33:45 +00002964 return 0;
2965err:
2966 be_close(adapter->netdev);
2967 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002968}
2969
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002970static int be_setup_wol(struct be_adapter *adapter, bool enable)
2971{
2972 struct be_dma_mem cmd;
2973 int status = 0;
2974 u8 mac[ETH_ALEN];
2975
2976 memset(mac, 0, ETH_ALEN);
2977
2978 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002979 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2980 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302981 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302982 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002983
2984 if (enable) {
2985 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302986 PCICFG_PM_CONTROL_OFFSET,
2987 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002988 if (status) {
2989 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002990 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002991 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2992 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002993 return status;
2994 }
2995 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302996 adapter->netdev->dev_addr,
2997 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002998 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2999 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3000 } else {
3001 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3002 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3003 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3004 }
3005
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003006 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003007 return status;
3008}
3009
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003010/*
3011 * Generate a seed MAC address from the PF MAC Address using jhash.
3012 * MAC Address for VFs are assigned incrementally starting from the seed.
3013 * These addresses are programmed in the ASIC by the PF and the VF driver
3014 * queries for the MAC address during its probe.
3015 */
Sathya Perla4c876612013-02-03 20:30:11 +00003016static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003017{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003018 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003019 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003020 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003021 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003022
3023 be_vf_eth_addr_generate(adapter, mac);
3024
Sathya Perla11ac75e2011-12-13 00:58:50 +00003025 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303026 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003027 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003028 vf_cfg->if_handle,
3029 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303030 else
3031 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3032 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003033
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003034 if (status)
3035 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303036 "Mac address assignment failed for VF %d\n",
3037 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003038 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003039 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003040
3041 mac[5] += 1;
3042 }
3043 return status;
3044}
3045
Sathya Perla4c876612013-02-03 20:30:11 +00003046static int be_vfs_mac_query(struct be_adapter *adapter)
3047{
3048 int status, vf;
3049 u8 mac[ETH_ALEN];
3050 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003051
3052 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303053 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3054 mac, vf_cfg->if_handle,
3055 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003056 if (status)
3057 return status;
3058 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3059 }
3060 return 0;
3061}
3062
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003063static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003064{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003065 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003066 u32 vf;
3067
Sathya Perla257a3fe2013-06-14 15:54:51 +05303068 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003069 dev_warn(&adapter->pdev->dev,
3070 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003071 goto done;
3072 }
3073
Sathya Perlab4c1df92013-05-08 02:05:47 +00003074 pci_disable_sriov(adapter->pdev);
3075
Sathya Perla11ac75e2011-12-13 00:58:50 +00003076 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303077 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003078 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3079 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303080 else
3081 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3082 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003083
Sathya Perla11ac75e2011-12-13 00:58:50 +00003084 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3085 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003086done:
3087 kfree(adapter->vf_cfg);
3088 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303089 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003090}
3091
Sathya Perla77071332013-08-27 16:57:34 +05303092static void be_clear_queues(struct be_adapter *adapter)
3093{
3094 be_mcc_queues_destroy(adapter);
3095 be_rx_cqs_destroy(adapter);
3096 be_tx_queues_destroy(adapter);
3097 be_evt_queues_destroy(adapter);
3098}
3099
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303100static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003101{
Sathya Perla191eb752012-02-23 18:50:13 +00003102 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3103 cancel_delayed_work_sync(&adapter->work);
3104 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3105 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303106}
3107
Somnath Koturb05004a2013-12-05 12:08:16 +05303108static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303109{
3110 int i;
3111
Somnath Koturb05004a2013-12-05 12:08:16 +05303112 if (adapter->pmac_id) {
3113 for (i = 0; i < (adapter->uc_macs + 1); i++)
3114 be_cmd_pmac_del(adapter, adapter->if_handle,
3115 adapter->pmac_id[i], 0);
3116 adapter->uc_macs = 0;
3117
3118 kfree(adapter->pmac_id);
3119 adapter->pmac_id = NULL;
3120 }
3121}
3122
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303123#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303124static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3125{
3126 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3127 be_cmd_manage_iface(adapter, adapter->if_handle,
3128 OP_CONVERT_TUNNEL_TO_NORMAL);
3129
3130 if (adapter->vxlan_port)
3131 be_cmd_set_vxlan_port(adapter, 0);
3132
3133 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3134 adapter->vxlan_port = 0;
3135}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303136#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303137
Somnath Koturb05004a2013-12-05 12:08:16 +05303138static int be_clear(struct be_adapter *adapter)
3139{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303140 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003141
Sathya Perla11ac75e2011-12-13 00:58:50 +00003142 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003143 be_vf_clear(adapter);
3144
Vasundhara Volambec84e62014-06-30 13:01:32 +05303145 /* Re-configure FW to distribute resources evenly across max-supported
3146 * number of VFs, only when VFs are not already enabled.
3147 */
3148 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3149 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3150 pci_sriov_get_totalvfs(adapter->pdev));
3151
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303152#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303153 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303154#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303155 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303156 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003157
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003158 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003159
Sathya Perla77071332013-08-27 16:57:34 +05303160 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003161
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003162 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303163 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003164 return 0;
3165}
3166
Sathya Perla4c876612013-02-03 20:30:11 +00003167static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003168{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303169 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003170 struct be_vf_cfg *vf_cfg;
3171 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003172 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003173
Sathya Perla4c876612013-02-03 20:30:11 +00003174 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3175 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003176
Sathya Perla4c876612013-02-03 20:30:11 +00003177 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303178 if (!BE3_chip(adapter)) {
3179 status = be_cmd_get_profile_config(adapter, &res,
3180 vf + 1);
3181 if (!status)
3182 cap_flags = res.if_cap_flags;
3183 }
Sathya Perla4c876612013-02-03 20:30:11 +00003184
3185 /* If a FW profile exists, then cap_flags are updated */
3186 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303187 BE_IF_FLAGS_BROADCAST |
3188 BE_IF_FLAGS_MULTICAST);
3189 status =
3190 be_cmd_if_create(adapter, cap_flags, en_flags,
3191 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003192 if (status)
3193 goto err;
3194 }
3195err:
3196 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003197}
3198
Sathya Perla39f1d942012-05-08 19:41:24 +00003199static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003200{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003201 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003202 int vf;
3203
Sathya Perla39f1d942012-05-08 19:41:24 +00003204 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3205 GFP_KERNEL);
3206 if (!adapter->vf_cfg)
3207 return -ENOMEM;
3208
Sathya Perla11ac75e2011-12-13 00:58:50 +00003209 for_all_vfs(adapter, vf_cfg, vf) {
3210 vf_cfg->if_handle = -1;
3211 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003212 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003213 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003214}
3215
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003216static int be_vf_setup(struct be_adapter *adapter)
3217{
Sathya Perla4c876612013-02-03 20:30:11 +00003218 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303219 struct be_vf_cfg *vf_cfg;
3220 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303221 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003222
Sathya Perla257a3fe2013-06-14 15:54:51 +05303223 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003224
3225 status = be_vf_setup_init(adapter);
3226 if (status)
3227 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003228
Sathya Perla4c876612013-02-03 20:30:11 +00003229 if (old_vfs) {
3230 for_all_vfs(adapter, vf_cfg, vf) {
3231 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3232 if (status)
3233 goto err;
3234 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003235
Sathya Perla4c876612013-02-03 20:30:11 +00003236 status = be_vfs_mac_query(adapter);
3237 if (status)
3238 goto err;
3239 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303240 status = be_vfs_if_create(adapter);
3241 if (status)
3242 goto err;
3243
Sathya Perla39f1d942012-05-08 19:41:24 +00003244 status = be_vf_eth_addr_config(adapter);
3245 if (status)
3246 goto err;
3247 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003248
Sathya Perla11ac75e2011-12-13 00:58:50 +00003249 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303250 /* Allow VFs to programs MAC/VLAN filters */
3251 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3252 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3253 status = be_cmd_set_fn_privileges(adapter,
3254 privileges |
3255 BE_PRIV_FILTMGMT,
3256 vf + 1);
3257 if (!status)
3258 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3259 vf);
3260 }
3261
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303262 /* Allow full available bandwidth */
3263 if (!old_vfs)
3264 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003265
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303266 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303267 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303268 be_cmd_set_logical_link_config(adapter,
3269 IFLA_VF_LINK_STATE_AUTO,
3270 vf+1);
3271 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003272 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003273
3274 if (!old_vfs) {
3275 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3276 if (status) {
3277 dev_err(dev, "SRIOV enable failed\n");
3278 adapter->num_vfs = 0;
3279 goto err;
3280 }
3281 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303282
3283 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003284 return 0;
3285err:
Sathya Perla4c876612013-02-03 20:30:11 +00003286 dev_err(dev, "VF setup failed\n");
3287 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003288 return status;
3289}
3290
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303291/* Converting function_mode bits on BE3 to SH mc_type enums */
3292
3293static u8 be_convert_mc_type(u32 function_mode)
3294{
Suresh Reddy66064db2014-06-23 16:41:29 +05303295 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303296 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303297 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303298 return FLEX10;
3299 else if (function_mode & VNIC_MODE)
3300 return vNIC2;
3301 else if (function_mode & UMC_ENABLED)
3302 return UMC;
3303 else
3304 return MC_NONE;
3305}
3306
Sathya Perla92bf14a2013-08-27 16:57:32 +05303307/* On BE2/BE3 FW does not suggest the supported limits */
3308static void BEx_get_resources(struct be_adapter *adapter,
3309 struct be_resources *res)
3310{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303311 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303312
3313 if (be_physfn(adapter))
3314 res->max_uc_mac = BE_UC_PMAC_COUNT;
3315 else
3316 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3317
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303318 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3319
3320 if (be_is_mc(adapter)) {
3321 /* Assuming that there are 4 channels per port,
3322 * when multi-channel is enabled
3323 */
3324 if (be_is_qnq_mode(adapter))
3325 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3326 else
3327 /* In a non-qnq multichannel mode, the pvid
3328 * takes up one vlan entry
3329 */
3330 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3331 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303332 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303333 }
3334
Sathya Perla92bf14a2013-08-27 16:57:32 +05303335 res->max_mcast_mac = BE_MAX_MC;
3336
Vasundhara Volama5243da2014-03-11 18:53:07 +05303337 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3338 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3339 * *only* if it is RSS-capable.
3340 */
3341 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3342 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303343 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303344 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303345 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3346 struct be_resources super_nic_res = {0};
3347
3348 /* On a SuperNIC profile, the driver needs to use the
3349 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3350 */
3351 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3352 /* Some old versions of BE3 FW don't report max_tx_qs value */
3353 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3354 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303355 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303356 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303357
3358 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3359 !use_sriov && be_physfn(adapter))
3360 res->max_rss_qs = (adapter->be3_native) ?
3361 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3362 res->max_rx_qs = res->max_rss_qs + 1;
3363
Suresh Reddye3dc8672014-01-06 13:02:25 +05303364 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303365 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303366 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3367 else
3368 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303369
3370 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3371 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3372 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3373}
3374
Sathya Perla30128032011-11-10 19:17:57 +00003375static void be_setup_init(struct be_adapter *adapter)
3376{
3377 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003378 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003379 adapter->if_handle = -1;
3380 adapter->be3_native = false;
3381 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003382 if (be_physfn(adapter))
3383 adapter->cmd_privileges = MAX_PRIVILEGES;
3384 else
3385 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003386}
3387
Vasundhara Volambec84e62014-06-30 13:01:32 +05303388static int be_get_sriov_config(struct be_adapter *adapter)
3389{
3390 struct device *dev = &adapter->pdev->dev;
3391 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303392 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303393
3394 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303395 be_cmd_get_profile_config(adapter, &res, 0);
3396
Vasundhara Volambec84e62014-06-30 13:01:32 +05303397 if (BE3_chip(adapter) && !res.max_vfs) {
3398 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3399 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3400 }
3401
Sathya Perlad3d18312014-08-01 17:47:30 +05303402 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303403
3404 if (!be_max_vfs(adapter)) {
3405 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303406 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303407 adapter->num_vfs = 0;
3408 return 0;
3409 }
3410
Sathya Perlad3d18312014-08-01 17:47:30 +05303411 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3412
Vasundhara Volambec84e62014-06-30 13:01:32 +05303413 /* validate num_vfs module param */
3414 old_vfs = pci_num_vf(adapter->pdev);
3415 if (old_vfs) {
3416 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3417 if (old_vfs != num_vfs)
3418 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3419 adapter->num_vfs = old_vfs;
3420 } else {
3421 if (num_vfs > be_max_vfs(adapter)) {
3422 dev_info(dev, "Resources unavailable to init %d VFs\n",
3423 num_vfs);
3424 dev_info(dev, "Limiting to %d VFs\n",
3425 be_max_vfs(adapter));
3426 }
3427 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3428 }
3429
3430 return 0;
3431}
3432
Sathya Perla92bf14a2013-08-27 16:57:32 +05303433static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003434{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303435 struct device *dev = &adapter->pdev->dev;
3436 struct be_resources res = {0};
3437 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003438
Sathya Perla92bf14a2013-08-27 16:57:32 +05303439 if (BEx_chip(adapter)) {
3440 BEx_get_resources(adapter, &res);
3441 adapter->res = res;
3442 }
3443
Sathya Perla92bf14a2013-08-27 16:57:32 +05303444 /* For Lancer, SH etc read per-function resource limits from FW.
3445 * GET_FUNC_CONFIG returns per function guaranteed limits.
3446 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3447 */
Sathya Perla4c876612013-02-03 20:30:11 +00003448 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303449 status = be_cmd_get_func_config(adapter, &res);
3450 if (status)
3451 return status;
3452
3453 /* If RoCE may be enabled stash away half the EQs for RoCE */
3454 if (be_roce_supported(adapter))
3455 res.max_evt_qs /= 2;
3456 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003457 }
3458
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303459 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3460 be_max_txqs(adapter), be_max_rxqs(adapter),
3461 be_max_rss(adapter), be_max_eqs(adapter),
3462 be_max_vfs(adapter));
3463 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3464 be_max_uc(adapter), be_max_mc(adapter),
3465 be_max_vlans(adapter));
3466
Sathya Perla92bf14a2013-08-27 16:57:32 +05303467 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003468}
3469
Sathya Perlad3d18312014-08-01 17:47:30 +05303470static void be_sriov_config(struct be_adapter *adapter)
3471{
3472 struct device *dev = &adapter->pdev->dev;
3473 int status;
3474
3475 status = be_get_sriov_config(adapter);
3476 if (status) {
3477 dev_err(dev, "Failed to query SR-IOV configuration\n");
3478 dev_err(dev, "SR-IOV cannot be enabled\n");
3479 return;
3480 }
3481
3482 /* When the HW is in SRIOV capable configuration, the PF-pool
3483 * resources are equally distributed across the max-number of
3484 * VFs. The user may request only a subset of the max-vfs to be
3485 * enabled. Based on num_vfs, redistribute the resources across
3486 * num_vfs so that each VF will have access to more number of
3487 * resources. This facility is not available in BE3 FW.
3488 * Also, this is done by FW in Lancer chip.
3489 */
3490 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3491 status = be_cmd_set_sriov_config(adapter,
3492 adapter->pool_res,
3493 adapter->num_vfs);
3494 if (status)
3495 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3496 }
3497}
3498
Sathya Perla39f1d942012-05-08 19:41:24 +00003499static int be_get_config(struct be_adapter *adapter)
3500{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303501 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003502 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003503
Kalesh APe97e3cd2014-07-17 16:20:26 +05303504 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003505 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303506 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003507
Vasundhara Volam542963b2014-01-15 13:23:33 +05303508 if (be_physfn(adapter)) {
3509 status = be_cmd_get_active_profile(adapter, &profile_id);
3510 if (!status)
3511 dev_info(&adapter->pdev->dev,
3512 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303513 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303514
Sathya Perlad3d18312014-08-01 17:47:30 +05303515 if (!BE2_chip(adapter) && be_physfn(adapter))
3516 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303517
Sathya Perla92bf14a2013-08-27 16:57:32 +05303518 status = be_get_resources(adapter);
3519 if (status)
3520 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003521
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303522 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3523 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303524 if (!adapter->pmac_id)
3525 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003526
Sathya Perla92bf14a2013-08-27 16:57:32 +05303527 /* Sanitize cfg_num_qs based on HW and platform limits */
3528 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3529
3530 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003531}
3532
Sathya Perla95046b92013-07-23 15:25:02 +05303533static int be_mac_setup(struct be_adapter *adapter)
3534{
3535 u8 mac[ETH_ALEN];
3536 int status;
3537
3538 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3539 status = be_cmd_get_perm_mac(adapter, mac);
3540 if (status)
3541 return status;
3542
3543 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3544 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3545 } else {
3546 /* Maybe the HW was reset; dev_addr must be re-programmed */
3547 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3548 }
3549
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003550 /* For BE3-R VFs, the PF programs the initial MAC address */
3551 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3552 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3553 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303554 return 0;
3555}
3556
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303557static void be_schedule_worker(struct be_adapter *adapter)
3558{
3559 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3560 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3561}
3562
Sathya Perla77071332013-08-27 16:57:34 +05303563static int be_setup_queues(struct be_adapter *adapter)
3564{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303565 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303566 int status;
3567
3568 status = be_evt_queues_create(adapter);
3569 if (status)
3570 goto err;
3571
3572 status = be_tx_qs_create(adapter);
3573 if (status)
3574 goto err;
3575
3576 status = be_rx_cqs_create(adapter);
3577 if (status)
3578 goto err;
3579
3580 status = be_mcc_queues_create(adapter);
3581 if (status)
3582 goto err;
3583
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303584 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3585 if (status)
3586 goto err;
3587
3588 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3589 if (status)
3590 goto err;
3591
Sathya Perla77071332013-08-27 16:57:34 +05303592 return 0;
3593err:
3594 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3595 return status;
3596}
3597
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303598int be_update_queues(struct be_adapter *adapter)
3599{
3600 struct net_device *netdev = adapter->netdev;
3601 int status;
3602
3603 if (netif_running(netdev))
3604 be_close(netdev);
3605
3606 be_cancel_worker(adapter);
3607
3608 /* If any vectors have been shared with RoCE we cannot re-program
3609 * the MSIx table.
3610 */
3611 if (!adapter->num_msix_roce_vec)
3612 be_msix_disable(adapter);
3613
3614 be_clear_queues(adapter);
3615
3616 if (!msix_enabled(adapter)) {
3617 status = be_msix_enable(adapter);
3618 if (status)
3619 return status;
3620 }
3621
3622 status = be_setup_queues(adapter);
3623 if (status)
3624 return status;
3625
3626 be_schedule_worker(adapter);
3627
3628 if (netif_running(netdev))
3629 status = be_open(netdev);
3630
3631 return status;
3632}
3633
Sathya Perla5fb379e2009-06-18 00:02:59 +00003634static int be_setup(struct be_adapter *adapter)
3635{
Sathya Perla39f1d942012-05-08 19:41:24 +00003636 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303637 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003638 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003639
Sathya Perla30128032011-11-10 19:17:57 +00003640 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003641
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003642 if (!lancer_chip(adapter))
3643 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003644
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003645 status = be_get_config(adapter);
3646 if (status)
3647 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003648
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003649 status = be_msix_enable(adapter);
3650 if (status)
3651 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003652
Sathya Perla77071332013-08-27 16:57:34 +05303653 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3654 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3655 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3656 en_flags |= BE_IF_FLAGS_RSS;
3657 en_flags = en_flags & be_if_cap_flags(adapter);
3658 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3659 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003660 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003661 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003662
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303663 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3664 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303665 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303666 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003667 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003668 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003669
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003670 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003671
Sathya Perla95046b92013-07-23 15:25:02 +05303672 status = be_mac_setup(adapter);
3673 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003674 goto err;
3675
Kalesh APe97e3cd2014-07-17 16:20:26 +05303676 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303677 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003678
Somnath Koture9e2a902013-10-24 14:37:53 +05303679 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303680 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303681 adapter->fw_ver);
3682 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3683 }
3684
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003685 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003686 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003687
3688 be_set_rx_mode(adapter->netdev);
3689
Suresh Reddy76a9e082014-01-15 13:23:40 +05303690 be_cmd_get_acpi_wol_cap(adapter);
3691
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003692 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003693
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003694 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3695 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003696 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003697
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303698 if (be_physfn(adapter))
3699 be_cmd_set_logical_link_config(adapter,
3700 IFLA_VF_LINK_STATE_AUTO, 0);
3701
Vasundhara Volambec84e62014-06-30 13:01:32 +05303702 if (adapter->num_vfs)
3703 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003704
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003705 status = be_cmd_get_phy_info(adapter);
3706 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003707 adapter->phy.fc_autoneg = 1;
3708
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303709 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303710 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003711 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003712err:
3713 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003714 return status;
3715}
3716
Ivan Vecera66268732011-12-08 01:31:21 +00003717#ifdef CONFIG_NET_POLL_CONTROLLER
3718static void be_netpoll(struct net_device *netdev)
3719{
3720 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003721 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003722 int i;
3723
Sathya Perlae49cc342012-11-27 19:50:02 +00003724 for_all_evt_queues(adapter, eqo, i) {
3725 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3726 napi_schedule(&eqo->napi);
3727 }
Ivan Vecera66268732011-12-08 01:31:21 +00003728}
3729#endif
3730
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303731static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003732
Sathya Perla306f1342011-08-02 19:57:45 +00003733static bool phy_flashing_required(struct be_adapter *adapter)
3734{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003735 return (adapter->phy.phy_type == TN_8022 &&
3736 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003737}
3738
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003739static bool is_comp_in_ufi(struct be_adapter *adapter,
3740 struct flash_section_info *fsec, int type)
3741{
3742 int i = 0, img_type = 0;
3743 struct flash_section_info_g2 *fsec_g2 = NULL;
3744
Sathya Perlaca34fe32012-11-06 17:48:56 +00003745 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003746 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3747
3748 for (i = 0; i < MAX_FLASH_COMP; i++) {
3749 if (fsec_g2)
3750 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3751 else
3752 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3753
3754 if (img_type == type)
3755 return true;
3756 }
3757 return false;
3758
3759}
3760
Jingoo Han4188e7d2013-08-05 18:02:02 +09003761static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303762 int header_size,
3763 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003764{
3765 struct flash_section_info *fsec = NULL;
3766 const u8 *p = fw->data;
3767
3768 p += header_size;
3769 while (p < (fw->data + fw->size)) {
3770 fsec = (struct flash_section_info *)p;
3771 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3772 return fsec;
3773 p += 32;
3774 }
3775 return NULL;
3776}
3777
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303778static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3779 u32 img_offset, u32 img_size, int hdr_size,
3780 u16 img_optype, bool *crc_match)
3781{
3782 u32 crc_offset;
3783 int status;
3784 u8 crc[4];
3785
3786 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3787 if (status)
3788 return status;
3789
3790 crc_offset = hdr_size + img_offset + img_size - 4;
3791
3792 /* Skip flashing, if crc of flashed region matches */
3793 if (!memcmp(crc, p + crc_offset, 4))
3794 *crc_match = true;
3795 else
3796 *crc_match = false;
3797
3798 return status;
3799}
3800
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003801static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303802 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003803{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003804 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303805 u32 total_bytes, flash_op, num_bytes;
3806 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003807
3808 total_bytes = img_size;
3809 while (total_bytes) {
3810 num_bytes = min_t(u32, 32*1024, total_bytes);
3811
3812 total_bytes -= num_bytes;
3813
3814 if (!total_bytes) {
3815 if (optype == OPTYPE_PHY_FW)
3816 flash_op = FLASHROM_OPER_PHY_FLASH;
3817 else
3818 flash_op = FLASHROM_OPER_FLASH;
3819 } else {
3820 if (optype == OPTYPE_PHY_FW)
3821 flash_op = FLASHROM_OPER_PHY_SAVE;
3822 else
3823 flash_op = FLASHROM_OPER_SAVE;
3824 }
3825
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003826 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003827 img += num_bytes;
3828 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303829 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303830 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303831 optype == OPTYPE_PHY_FW)
3832 break;
3833 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003834 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003835 }
3836 return 0;
3837}
3838
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003839/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003840static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303841 const struct firmware *fw,
3842 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003843{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003844 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303845 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003846 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303847 int status, i, filehdr_size, num_comp;
3848 const struct flash_comp *pflashcomp;
3849 bool crc_match;
3850 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003851
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003852 struct flash_comp gen3_flash_types[] = {
3853 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3854 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3855 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3856 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3857 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3858 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3859 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3860 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3861 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3862 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3863 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3864 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3865 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3866 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3867 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3868 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3869 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3870 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3871 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3872 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003873 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003874
3875 struct flash_comp gen2_flash_types[] = {
3876 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3877 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3878 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3879 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3880 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3881 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3882 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3883 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3884 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3885 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3886 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3887 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3888 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3889 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3890 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3891 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003892 };
3893
Sathya Perlaca34fe32012-11-06 17:48:56 +00003894 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003895 pflashcomp = gen3_flash_types;
3896 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003897 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003898 } else {
3899 pflashcomp = gen2_flash_types;
3900 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003901 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003902 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003903
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003904 /* Get flash section info*/
3905 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3906 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303907 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003908 return -1;
3909 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003910 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003911 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003912 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003913
3914 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3915 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3916 continue;
3917
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003918 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3919 !phy_flashing_required(adapter))
3920 continue;
3921
3922 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303923 status = be_check_flash_crc(adapter, fw->data,
3924 pflashcomp[i].offset,
3925 pflashcomp[i].size,
3926 filehdr_size +
3927 img_hdrs_size,
3928 OPTYPE_REDBOOT, &crc_match);
3929 if (status) {
3930 dev_err(dev,
3931 "Could not get CRC for 0x%x region\n",
3932 pflashcomp[i].optype);
3933 continue;
3934 }
3935
3936 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003937 continue;
3938 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003939
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303940 p = fw->data + filehdr_size + pflashcomp[i].offset +
3941 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003942 if (p + pflashcomp[i].size > fw->data + fw->size)
3943 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003944
3945 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303946 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003947 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303948 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003949 pflashcomp[i].img_type);
3950 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003951 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003952 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003953 return 0;
3954}
3955
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303956static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3957{
3958 u32 img_type = le32_to_cpu(fsec_entry.type);
3959 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3960
3961 if (img_optype != 0xFFFF)
3962 return img_optype;
3963
3964 switch (img_type) {
3965 case IMAGE_FIRMWARE_iSCSI:
3966 img_optype = OPTYPE_ISCSI_ACTIVE;
3967 break;
3968 case IMAGE_BOOT_CODE:
3969 img_optype = OPTYPE_REDBOOT;
3970 break;
3971 case IMAGE_OPTION_ROM_ISCSI:
3972 img_optype = OPTYPE_BIOS;
3973 break;
3974 case IMAGE_OPTION_ROM_PXE:
3975 img_optype = OPTYPE_PXE_BIOS;
3976 break;
3977 case IMAGE_OPTION_ROM_FCoE:
3978 img_optype = OPTYPE_FCOE_BIOS;
3979 break;
3980 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3981 img_optype = OPTYPE_ISCSI_BACKUP;
3982 break;
3983 case IMAGE_NCSI:
3984 img_optype = OPTYPE_NCSI_FW;
3985 break;
3986 case IMAGE_FLASHISM_JUMPVECTOR:
3987 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3988 break;
3989 case IMAGE_FIRMWARE_PHY:
3990 img_optype = OPTYPE_SH_PHY_FW;
3991 break;
3992 case IMAGE_REDBOOT_DIR:
3993 img_optype = OPTYPE_REDBOOT_DIR;
3994 break;
3995 case IMAGE_REDBOOT_CONFIG:
3996 img_optype = OPTYPE_REDBOOT_CONFIG;
3997 break;
3998 case IMAGE_UFI_DIR:
3999 img_optype = OPTYPE_UFI_DIR;
4000 break;
4001 default:
4002 break;
4003 }
4004
4005 return img_optype;
4006}
4007
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004008static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304009 const struct firmware *fw,
4010 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004011{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004012 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304013 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004014 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304015 u32 img_offset, img_size, img_type;
4016 int status, i, filehdr_size;
4017 bool crc_match, old_fw_img;
4018 u16 img_optype;
4019 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004020
4021 filehdr_size = sizeof(struct flash_file_hdr_g3);
4022 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4023 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304024 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304025 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004026 }
4027
4028 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4029 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4030 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304031 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4032 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4033 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004034
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304035 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004036 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304037 /* Don't bother verifying CRC if an old FW image is being
4038 * flashed
4039 */
4040 if (old_fw_img)
4041 goto flash;
4042
4043 status = be_check_flash_crc(adapter, fw->data, img_offset,
4044 img_size, filehdr_size +
4045 img_hdrs_size, img_optype,
4046 &crc_match);
4047 /* The current FW image on the card does not recognize the new
4048 * FLASH op_type. The FW download is partially complete.
4049 * Reboot the server now to enable FW image to recognize the
4050 * new FLASH op_type. To complete the remaining process,
4051 * download the same FW again after the reboot.
4052 */
Kalesh AP4c600052014-05-30 19:06:26 +05304053 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4054 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304055 dev_err(dev, "Flash incomplete. Reset the server\n");
4056 dev_err(dev, "Download FW image again after reset\n");
4057 return -EAGAIN;
4058 } else if (status) {
4059 dev_err(dev, "Could not get CRC for 0x%x region\n",
4060 img_optype);
4061 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004062 }
4063
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304064 if (crc_match)
4065 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004066
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304067flash:
4068 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004069 if (p + img_size > fw->data + fw->size)
4070 return -1;
4071
4072 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304073 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4074 * UFI_DIR region
4075 */
Kalesh AP4c600052014-05-30 19:06:26 +05304076 if (old_fw_img &&
4077 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4078 (img_optype == OPTYPE_UFI_DIR &&
4079 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304080 continue;
4081 } else if (status) {
4082 dev_err(dev, "Flashing section type 0x%x failed\n",
4083 img_type);
4084 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004085 }
4086 }
4087 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004088}
4089
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004090static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304091 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004092{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004093#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4094#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304095 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004096 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004097 const u8 *data_ptr = NULL;
4098 u8 *dest_image_ptr = NULL;
4099 size_t image_size = 0;
4100 u32 chunk_size = 0;
4101 u32 data_written = 0;
4102 u32 offset = 0;
4103 int status = 0;
4104 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004105 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004106
4107 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304108 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304109 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004110 }
4111
4112 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4113 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304114 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004115 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304116 if (!flash_cmd.va)
4117 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004118
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004119 dest_image_ptr = flash_cmd.va +
4120 sizeof(struct lancer_cmd_req_write_object);
4121 image_size = fw->size;
4122 data_ptr = fw->data;
4123
4124 while (image_size) {
4125 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4126
4127 /* Copy the image chunk content. */
4128 memcpy(dest_image_ptr, data_ptr, chunk_size);
4129
4130 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004131 chunk_size, offset,
4132 LANCER_FW_DOWNLOAD_LOCATION,
4133 &data_written, &change_status,
4134 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004135 if (status)
4136 break;
4137
4138 offset += data_written;
4139 data_ptr += data_written;
4140 image_size -= data_written;
4141 }
4142
4143 if (!status) {
4144 /* Commit the FW written */
4145 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004146 0, offset,
4147 LANCER_FW_DOWNLOAD_LOCATION,
4148 &data_written, &change_status,
4149 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004150 }
4151
Kalesh APbb864e02014-09-02 09:56:51 +05304152 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004153 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304154 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304155 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004156 }
4157
Kalesh APbb864e02014-09-02 09:56:51 +05304158 dev_info(dev, "Firmware flashed successfully\n");
4159
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004160 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304161 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004162 status = lancer_physdev_ctrl(adapter,
4163 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004164 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304165 dev_err(dev, "Adapter busy, could not reset FW\n");
4166 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004167 }
4168 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304169 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004170 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304171
4172 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004173}
4174
Sathya Perlaca34fe32012-11-06 17:48:56 +00004175#define UFI_TYPE2 2
4176#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004177#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004178#define UFI_TYPE4 4
4179static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004180 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004181{
Kalesh APddf11692014-07-17 16:20:28 +05304182 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004183 goto be_get_ufi_exit;
4184
Sathya Perlaca34fe32012-11-06 17:48:56 +00004185 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4186 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004187 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4188 if (fhdr->asic_type_rev == 0x10)
4189 return UFI_TYPE3R;
4190 else
4191 return UFI_TYPE3;
4192 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004193 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004194
4195be_get_ufi_exit:
4196 dev_err(&adapter->pdev->dev,
4197 "UFI and Interface are not compatible for flashing\n");
4198 return -1;
4199}
4200
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004201static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4202{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004203 struct flash_file_hdr_g3 *fhdr3;
4204 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004205 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004206 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004207 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004208
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004209 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004210 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4211 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004212 if (!flash_cmd.va) {
4213 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004214 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004215 }
4216
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004217 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004218 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004219
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004220 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004221
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004222 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4223 for (i = 0; i < num_imgs; i++) {
4224 img_hdr_ptr = (struct image_hdr *)(fw->data +
4225 (sizeof(struct flash_file_hdr_g3) +
4226 i * sizeof(struct image_hdr)));
4227 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004228 switch (ufi_type) {
4229 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004230 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304231 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004232 break;
4233 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004234 status = be_flash_BEx(adapter, fw, &flash_cmd,
4235 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004236 break;
4237 case UFI_TYPE3:
4238 /* Do not flash this ufi on BE3-R cards */
4239 if (adapter->asic_rev < 0x10)
4240 status = be_flash_BEx(adapter, fw,
4241 &flash_cmd,
4242 num_imgs);
4243 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304244 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004245 dev_err(&adapter->pdev->dev,
4246 "Can't load BE3 UFI on BE3R\n");
4247 }
4248 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004249 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004250 }
4251
Sathya Perlaca34fe32012-11-06 17:48:56 +00004252 if (ufi_type == UFI_TYPE2)
4253 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004254 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304255 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004256
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004257 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4258 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004259 if (status) {
4260 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004261 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004262 }
4263
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004264 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004265
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004266be_fw_exit:
4267 return status;
4268}
4269
4270int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4271{
4272 const struct firmware *fw;
4273 int status;
4274
4275 if (!netif_running(adapter->netdev)) {
4276 dev_err(&adapter->pdev->dev,
4277 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304278 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004279 }
4280
4281 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4282 if (status)
4283 goto fw_exit;
4284
4285 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4286
4287 if (lancer_chip(adapter))
4288 status = lancer_fw_download(adapter, fw);
4289 else
4290 status = be_fw_download(adapter, fw);
4291
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004292 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304293 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004294
Ajit Khaparde84517482009-09-04 03:12:16 +00004295fw_exit:
4296 release_firmware(fw);
4297 return status;
4298}
4299
Sathya Perla748b5392014-05-09 13:29:13 +05304300static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004301{
4302 struct be_adapter *adapter = netdev_priv(dev);
4303 struct nlattr *attr, *br_spec;
4304 int rem;
4305 int status = 0;
4306 u16 mode = 0;
4307
4308 if (!sriov_enabled(adapter))
4309 return -EOPNOTSUPP;
4310
4311 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004312 if (!br_spec)
4313 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004314
4315 nla_for_each_nested(attr, br_spec, rem) {
4316 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4317 continue;
4318
Thomas Grafb7c1a312014-11-26 13:42:17 +01004319 if (nla_len(attr) < sizeof(mode))
4320 return -EINVAL;
4321
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004322 mode = nla_get_u16(attr);
4323 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4324 return -EINVAL;
4325
4326 status = be_cmd_set_hsw_config(adapter, 0, 0,
4327 adapter->if_handle,
4328 mode == BRIDGE_MODE_VEPA ?
4329 PORT_FWD_TYPE_VEPA :
4330 PORT_FWD_TYPE_VEB);
4331 if (status)
4332 goto err;
4333
4334 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4335 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4336
4337 return status;
4338 }
4339err:
4340 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4341 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4342
4343 return status;
4344}
4345
4346static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304347 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004348{
4349 struct be_adapter *adapter = netdev_priv(dev);
4350 int status = 0;
4351 u8 hsw_mode;
4352
4353 if (!sriov_enabled(adapter))
4354 return 0;
4355
4356 /* BE and Lancer chips support VEB mode only */
4357 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4358 hsw_mode = PORT_FWD_TYPE_VEB;
4359 } else {
4360 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4361 adapter->if_handle, &hsw_mode);
4362 if (status)
4363 return 0;
4364 }
4365
4366 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4367 hsw_mode == PORT_FWD_TYPE_VEPA ?
4368 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4369}
4370
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304371#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304372static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4373 __be16 port)
4374{
4375 struct be_adapter *adapter = netdev_priv(netdev);
4376 struct device *dev = &adapter->pdev->dev;
4377 int status;
4378
4379 if (lancer_chip(adapter) || BEx_chip(adapter))
4380 return;
4381
4382 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4383 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4384 be16_to_cpu(port));
4385 dev_info(dev,
4386 "Only one UDP port supported for VxLAN offloads\n");
4387 return;
4388 }
4389
4390 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4391 OP_CONVERT_NORMAL_TO_TUNNEL);
4392 if (status) {
4393 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4394 goto err;
4395 }
4396
4397 status = be_cmd_set_vxlan_port(adapter, port);
4398 if (status) {
4399 dev_warn(dev, "Failed to add VxLAN port\n");
4400 goto err;
4401 }
4402 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4403 adapter->vxlan_port = port;
4404
4405 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4406 be16_to_cpu(port));
4407 return;
4408err:
4409 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304410}
4411
4412static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4413 __be16 port)
4414{
4415 struct be_adapter *adapter = netdev_priv(netdev);
4416
4417 if (lancer_chip(adapter) || BEx_chip(adapter))
4418 return;
4419
4420 if (adapter->vxlan_port != port)
4421 return;
4422
4423 be_disable_vxlan_offloads(adapter);
4424
4425 dev_info(&adapter->pdev->dev,
4426 "Disabled VxLAN offloads for UDP port %d\n",
4427 be16_to_cpu(port));
4428}
Joe Stringer725d5482014-11-13 16:38:13 -08004429
4430static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
4431{
4432 return vxlan_gso_check(skb);
4433}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304434#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304435
stephen hemmingere5686ad2012-01-05 19:10:25 +00004436static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004437 .ndo_open = be_open,
4438 .ndo_stop = be_close,
4439 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004440 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004441 .ndo_set_mac_address = be_mac_addr_set,
4442 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004443 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004444 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004445 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4446 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004447 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004448 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004449 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004450 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304451 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004452#ifdef CONFIG_NET_POLL_CONTROLLER
4453 .ndo_poll_controller = be_netpoll,
4454#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004455 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4456 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304457#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304458 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304459#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304460#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304461 .ndo_add_vxlan_port = be_add_vxlan_port,
4462 .ndo_del_vxlan_port = be_del_vxlan_port,
Joe Stringer725d5482014-11-13 16:38:13 -08004463 .ndo_gso_check = be_gso_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304464#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004465};
4466
4467static void be_netdev_init(struct net_device *netdev)
4468{
4469 struct be_adapter *adapter = netdev_priv(netdev);
4470
Sathya Perlac9c47142014-03-27 10:46:19 +05304471 if (skyhawk_chip(adapter)) {
4472 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4473 NETIF_F_TSO | NETIF_F_TSO6 |
4474 NETIF_F_GSO_UDP_TUNNEL;
4475 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4476 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004477 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004478 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004479 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004480 if (be_multi_rxq(adapter))
4481 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004482
4483 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004484 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004485
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004486 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004487 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004488
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004489 netdev->priv_flags |= IFF_UNICAST_FLT;
4490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004491 netdev->flags |= IFF_MULTICAST;
4492
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004493 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004494
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004495 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004496
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004497 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004498}
4499
4500static void be_unmap_pci_bars(struct be_adapter *adapter)
4501{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004502 if (adapter->csr)
4503 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004504 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004505 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004506}
4507
Sathya Perlace66f782012-11-06 17:48:58 +00004508static int db_bar(struct be_adapter *adapter)
4509{
4510 if (lancer_chip(adapter) || !be_physfn(adapter))
4511 return 0;
4512 else
4513 return 4;
4514}
4515
4516static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004517{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004518 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004519 adapter->roce_db.size = 4096;
4520 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4521 db_bar(adapter));
4522 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4523 db_bar(adapter));
4524 }
Parav Pandit045508a2012-03-26 14:27:13 +00004525 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004526}
4527
4528static int be_map_pci_bars(struct be_adapter *adapter)
4529{
4530 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004531
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004532 if (BEx_chip(adapter) && be_physfn(adapter)) {
4533 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304534 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004535 return -ENOMEM;
4536 }
4537
Sathya Perlace66f782012-11-06 17:48:58 +00004538 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304539 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004540 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004541 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004542
4543 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004544 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004545
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004546pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304547 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004548 be_unmap_pci_bars(adapter);
4549 return -ENOMEM;
4550}
4551
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004552static void be_ctrl_cleanup(struct be_adapter *adapter)
4553{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004554 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004555
4556 be_unmap_pci_bars(adapter);
4557
4558 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004559 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4560 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004561
Sathya Perla5b8821b2011-08-02 19:57:44 +00004562 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004563 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004564 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4565 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004566}
4567
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004568static int be_ctrl_init(struct be_adapter *adapter)
4569{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004570 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4571 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004572 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004573 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004574 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004575
Sathya Perlace66f782012-11-06 17:48:58 +00004576 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4577 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4578 SLI_INTF_FAMILY_SHIFT;
4579 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4580
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004581 status = be_map_pci_bars(adapter);
4582 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004583 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004584
4585 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004586 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4587 mbox_mem_alloc->size,
4588 &mbox_mem_alloc->dma,
4589 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004590 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004591 status = -ENOMEM;
4592 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004593 }
4594 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4595 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4596 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4597 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004598
Sathya Perla5b8821b2011-08-02 19:57:44 +00004599 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004600 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4601 rx_filter->size, &rx_filter->dma,
4602 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304603 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004604 status = -ENOMEM;
4605 goto free_mbox;
4606 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004607
Ivan Vecera29849612010-12-14 05:43:19 +00004608 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004609 spin_lock_init(&adapter->mcc_lock);
4610 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004611
Suresh Reddy5eeff632014-01-06 13:02:24 +05304612 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004613 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004614 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004615
4616free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004617 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4618 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004619
4620unmap_pci_bars:
4621 be_unmap_pci_bars(adapter);
4622
4623done:
4624 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004625}
4626
4627static void be_stats_cleanup(struct be_adapter *adapter)
4628{
Sathya Perla3abcded2010-10-03 22:12:27 -07004629 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004630
4631 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004632 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4633 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004634}
4635
4636static int be_stats_init(struct be_adapter *adapter)
4637{
Sathya Perla3abcded2010-10-03 22:12:27 -07004638 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004639
Sathya Perlaca34fe32012-11-06 17:48:56 +00004640 if (lancer_chip(adapter))
4641 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4642 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004643 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004644 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004645 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004646 else
4647 /* ALL non-BE ASICs */
4648 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004649
Joe Perchesede23fa2013-08-26 22:45:23 -07004650 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4651 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304652 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304653 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004654 return 0;
4655}
4656
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004657static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004658{
4659 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004660
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004661 if (!adapter)
4662 return;
4663
Parav Pandit045508a2012-03-26 14:27:13 +00004664 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004665 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004666
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004667 cancel_delayed_work_sync(&adapter->func_recovery_work);
4668
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004669 unregister_netdev(adapter->netdev);
4670
Sathya Perla5fb379e2009-06-18 00:02:59 +00004671 be_clear(adapter);
4672
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004673 /* tell fw we're done with firing cmds */
4674 be_cmd_fw_clean(adapter);
4675
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004676 be_stats_cleanup(adapter);
4677
4678 be_ctrl_cleanup(adapter);
4679
Sathya Perlad6b6d982012-09-05 01:56:48 +00004680 pci_disable_pcie_error_reporting(pdev);
4681
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004682 pci_release_regions(pdev);
4683 pci_disable_device(pdev);
4684
4685 free_netdev(adapter->netdev);
4686}
4687
Sathya Perla39f1d942012-05-08 19:41:24 +00004688static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004689{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304690 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004691
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004692 status = be_cmd_get_cntl_attributes(adapter);
4693 if (status)
4694 return status;
4695
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004696 /* Must be a power of 2 or else MODULO will BUG_ON */
4697 adapter->be_get_temp_freq = 64;
4698
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304699 if (BEx_chip(adapter)) {
4700 level = be_cmd_get_fw_log_level(adapter);
4701 adapter->msg_enable =
4702 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4703 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004704
Sathya Perla92bf14a2013-08-27 16:57:32 +05304705 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004706 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004707}
4708
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004709static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004710{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004711 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004712 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004713
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004714 status = lancer_test_and_set_rdy_state(adapter);
4715 if (status)
4716 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004717
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004718 if (netif_running(adapter->netdev))
4719 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004720
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004721 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004722
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004723 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004724
4725 status = be_setup(adapter);
4726 if (status)
4727 goto err;
4728
4729 if (netif_running(adapter->netdev)) {
4730 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004731 if (status)
4732 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004733 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004734
Somnath Kotur4bebb562013-12-05 12:07:55 +05304735 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004736 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004737err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004738 if (status == -EAGAIN)
4739 dev_err(dev, "Waiting for resource provisioning\n");
4740 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304741 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004742
4743 return status;
4744}
4745
4746static void be_func_recovery_task(struct work_struct *work)
4747{
4748 struct be_adapter *adapter =
4749 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004750 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004751
4752 be_detect_error(adapter);
4753
4754 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004755 rtnl_lock();
4756 netif_device_detach(adapter->netdev);
4757 rtnl_unlock();
4758
4759 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004760 if (!status)
4761 netif_device_attach(adapter->netdev);
4762 }
4763
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004764 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4765 * no need to attempt further recovery.
4766 */
4767 if (!status || status == -EAGAIN)
4768 schedule_delayed_work(&adapter->func_recovery_work,
4769 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004770}
4771
4772static void be_worker(struct work_struct *work)
4773{
4774 struct be_adapter *adapter =
4775 container_of(work, struct be_adapter, work.work);
4776 struct be_rx_obj *rxo;
4777 int i;
4778
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004779 /* when interrupts are not yet enabled, just reap any pending
4780 * mcc completions */
4781 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004782 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004783 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004784 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004785 goto reschedule;
4786 }
4787
4788 if (!adapter->stats_cmd_sent) {
4789 if (lancer_chip(adapter))
4790 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05304791 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004792 else
4793 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4794 }
4795
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304796 if (be_physfn(adapter) &&
4797 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004798 be_cmd_get_die_temperature(adapter);
4799
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004800 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304801 /* Replenish RX-queues starved due to memory
4802 * allocation failures.
4803 */
4804 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05304805 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004806 }
4807
Sathya Perla2632baf2013-10-01 16:00:00 +05304808 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004809
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004810reschedule:
4811 adapter->work_counter++;
4812 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4813}
4814
Sathya Perla257a3fe2013-06-14 15:54:51 +05304815/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004816static bool be_reset_required(struct be_adapter *adapter)
4817{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304818 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004819}
4820
Sathya Perlad3791422012-09-28 04:39:44 +00004821static char *mc_name(struct be_adapter *adapter)
4822{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304823 char *str = ""; /* default */
4824
4825 switch (adapter->mc_type) {
4826 case UMC:
4827 str = "UMC";
4828 break;
4829 case FLEX10:
4830 str = "FLEX10";
4831 break;
4832 case vNIC1:
4833 str = "vNIC-1";
4834 break;
4835 case nPAR:
4836 str = "nPAR";
4837 break;
4838 case UFP:
4839 str = "UFP";
4840 break;
4841 case vNIC2:
4842 str = "vNIC-2";
4843 break;
4844 default:
4845 str = "";
4846 }
4847
4848 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004849}
4850
4851static inline char *func_name(struct be_adapter *adapter)
4852{
4853 return be_physfn(adapter) ? "PF" : "VF";
4854}
4855
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004856static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004857{
4858 int status = 0;
4859 struct be_adapter *adapter;
4860 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004861 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004862
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304863 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4864
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004865 status = pci_enable_device(pdev);
4866 if (status)
4867 goto do_none;
4868
4869 status = pci_request_regions(pdev, DRV_NAME);
4870 if (status)
4871 goto disable_dev;
4872 pci_set_master(pdev);
4873
Sathya Perla7f640062012-06-05 19:37:20 +00004874 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304875 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004876 status = -ENOMEM;
4877 goto rel_reg;
4878 }
4879 adapter = netdev_priv(netdev);
4880 adapter->pdev = pdev;
4881 pci_set_drvdata(pdev, adapter);
4882 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004883 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004884
Russell King4c15c242013-06-26 23:49:11 +01004885 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004886 if (!status) {
4887 netdev->features |= NETIF_F_HIGHDMA;
4888 } else {
Russell King4c15c242013-06-26 23:49:11 +01004889 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004890 if (status) {
4891 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4892 goto free_netdev;
4893 }
4894 }
4895
Kalesh AP2f951a92014-09-12 17:39:21 +05304896 status = pci_enable_pcie_error_reporting(pdev);
4897 if (!status)
4898 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004899
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004900 status = be_ctrl_init(adapter);
4901 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004902 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004903
Sathya Perla2243e2e2009-11-22 22:02:03 +00004904 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004905 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004906 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004907 if (status)
4908 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004909 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004910
Sathya Perla39f1d942012-05-08 19:41:24 +00004911 if (be_reset_required(adapter)) {
4912 status = be_cmd_reset_function(adapter);
4913 if (status)
4914 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004915
Kalesh AP2d177be2013-04-28 22:22:29 +00004916 /* Wait for interrupts to quiesce after an FLR */
4917 msleep(100);
4918 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004919
4920 /* Allow interrupts for other ULPs running on NIC function */
4921 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004922
Kalesh AP2d177be2013-04-28 22:22:29 +00004923 /* tell fw we're ready to fire cmds */
4924 status = be_cmd_fw_init(adapter);
4925 if (status)
4926 goto ctrl_clean;
4927
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004928 status = be_stats_init(adapter);
4929 if (status)
4930 goto ctrl_clean;
4931
Sathya Perla39f1d942012-05-08 19:41:24 +00004932 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004933 if (status)
4934 goto stats_clean;
4935
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004936 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004937 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05304938 adapter->rx_fc = true;
4939 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004940
Sathya Perla5fb379e2009-06-18 00:02:59 +00004941 status = be_setup(adapter);
4942 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004943 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004944
Sathya Perla3abcded2010-10-03 22:12:27 -07004945 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004946 status = register_netdev(netdev);
4947 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004948 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004949
Parav Pandit045508a2012-03-26 14:27:13 +00004950 be_roce_dev_add(adapter);
4951
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004952 schedule_delayed_work(&adapter->func_recovery_work,
4953 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004954
4955 be_cmd_query_port_name(adapter, &port_name);
4956
Sathya Perlad3791422012-09-28 04:39:44 +00004957 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4958 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004959
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004960 return 0;
4961
Sathya Perla5fb379e2009-06-18 00:02:59 +00004962unsetup:
4963 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004964stats_clean:
4965 be_stats_cleanup(adapter);
4966ctrl_clean:
4967 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004968free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004969 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004970rel_reg:
4971 pci_release_regions(pdev);
4972disable_dev:
4973 pci_disable_device(pdev);
4974do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004975 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004976 return status;
4977}
4978
4979static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4980{
4981 struct be_adapter *adapter = pci_get_drvdata(pdev);
4982 struct net_device *netdev = adapter->netdev;
4983
Suresh Reddy76a9e082014-01-15 13:23:40 +05304984 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004985 be_setup_wol(adapter, true);
4986
Ajit Khaparded4360d62013-11-22 12:51:09 -06004987 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004988 cancel_delayed_work_sync(&adapter->func_recovery_work);
4989
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004990 netif_device_detach(netdev);
4991 if (netif_running(netdev)) {
4992 rtnl_lock();
4993 be_close(netdev);
4994 rtnl_unlock();
4995 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004996 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004997
4998 pci_save_state(pdev);
4999 pci_disable_device(pdev);
5000 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5001 return 0;
5002}
5003
5004static int be_resume(struct pci_dev *pdev)
5005{
5006 int status = 0;
5007 struct be_adapter *adapter = pci_get_drvdata(pdev);
5008 struct net_device *netdev = adapter->netdev;
5009
5010 netif_device_detach(netdev);
5011
5012 status = pci_enable_device(pdev);
5013 if (status)
5014 return status;
5015
Yijing Wang1ca01512013-06-27 20:53:42 +08005016 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005017 pci_restore_state(pdev);
5018
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305019 status = be_fw_wait_ready(adapter);
5020 if (status)
5021 return status;
5022
Ajit Khaparded4360d62013-11-22 12:51:09 -06005023 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005024 /* tell fw we're ready to fire cmds */
5025 status = be_cmd_fw_init(adapter);
5026 if (status)
5027 return status;
5028
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005029 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005030 if (netif_running(netdev)) {
5031 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005032 be_open(netdev);
5033 rtnl_unlock();
5034 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005035
5036 schedule_delayed_work(&adapter->func_recovery_work,
5037 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005038 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005039
Suresh Reddy76a9e082014-01-15 13:23:40 +05305040 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005041 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005042
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005043 return 0;
5044}
5045
Sathya Perla82456b02010-02-17 01:35:37 +00005046/*
5047 * An FLR will stop BE from DMAing any data.
5048 */
5049static void be_shutdown(struct pci_dev *pdev)
5050{
5051 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005052
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005053 if (!adapter)
5054 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005055
Devesh Sharmad114f992014-06-10 19:32:15 +05305056 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005057 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005058 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005059
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005060 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005061
Ajit Khaparde57841862011-04-06 18:08:43 +00005062 be_cmd_reset_function(adapter);
5063
Sathya Perla82456b02010-02-17 01:35:37 +00005064 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005065}
5066
Sathya Perlacf588472010-02-14 21:22:01 +00005067static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305068 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005069{
5070 struct be_adapter *adapter = pci_get_drvdata(pdev);
5071 struct net_device *netdev = adapter->netdev;
5072
5073 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5074
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005075 if (!adapter->eeh_error) {
5076 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005077
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005078 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005079
Sathya Perlacf588472010-02-14 21:22:01 +00005080 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005081 netif_device_detach(netdev);
5082 if (netif_running(netdev))
5083 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005084 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005085
5086 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005087 }
Sathya Perlacf588472010-02-14 21:22:01 +00005088
5089 if (state == pci_channel_io_perm_failure)
5090 return PCI_ERS_RESULT_DISCONNECT;
5091
5092 pci_disable_device(pdev);
5093
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005094 /* The error could cause the FW to trigger a flash debug dump.
5095 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005096 * can cause it not to recover; wait for it to finish.
5097 * Wait only for first function as it is needed only once per
5098 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005099 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005100 if (pdev->devfn == 0)
5101 ssleep(30);
5102
Sathya Perlacf588472010-02-14 21:22:01 +00005103 return PCI_ERS_RESULT_NEED_RESET;
5104}
5105
5106static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5107{
5108 struct be_adapter *adapter = pci_get_drvdata(pdev);
5109 int status;
5110
5111 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005112
5113 status = pci_enable_device(pdev);
5114 if (status)
5115 return PCI_ERS_RESULT_DISCONNECT;
5116
5117 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005118 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005119 pci_restore_state(pdev);
5120
5121 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005122 dev_info(&adapter->pdev->dev,
5123 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005124 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005125 if (status)
5126 return PCI_ERS_RESULT_DISCONNECT;
5127
Sathya Perlad6b6d982012-09-05 01:56:48 +00005128 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005129 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005130 return PCI_ERS_RESULT_RECOVERED;
5131}
5132
5133static void be_eeh_resume(struct pci_dev *pdev)
5134{
5135 int status = 0;
5136 struct be_adapter *adapter = pci_get_drvdata(pdev);
5137 struct net_device *netdev = adapter->netdev;
5138
5139 dev_info(&adapter->pdev->dev, "EEH resume\n");
5140
5141 pci_save_state(pdev);
5142
Kalesh AP2d177be2013-04-28 22:22:29 +00005143 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005144 if (status)
5145 goto err;
5146
Kalesh AP03a58ba2014-05-13 14:03:11 +05305147 /* On some BE3 FW versions, after a HW reset,
5148 * interrupts will remain disabled for each function.
5149 * So, explicitly enable interrupts
5150 */
5151 be_intr_set(adapter, true);
5152
Kalesh AP2d177be2013-04-28 22:22:29 +00005153 /* tell fw we're ready to fire cmds */
5154 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005155 if (status)
5156 goto err;
5157
Sathya Perlacf588472010-02-14 21:22:01 +00005158 status = be_setup(adapter);
5159 if (status)
5160 goto err;
5161
5162 if (netif_running(netdev)) {
5163 status = be_open(netdev);
5164 if (status)
5165 goto err;
5166 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005167
5168 schedule_delayed_work(&adapter->func_recovery_work,
5169 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005170 netif_device_attach(netdev);
5171 return;
5172err:
5173 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005174}
5175
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005176static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005177 .error_detected = be_eeh_err_detected,
5178 .slot_reset = be_eeh_reset,
5179 .resume = be_eeh_resume,
5180};
5181
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005182static struct pci_driver be_driver = {
5183 .name = DRV_NAME,
5184 .id_table = be_dev_ids,
5185 .probe = be_probe,
5186 .remove = be_remove,
5187 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005188 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005189 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005190 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005191};
5192
5193static int __init be_init_module(void)
5194{
Joe Perches8e95a202009-12-03 07:58:21 +00005195 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5196 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005197 printk(KERN_WARNING DRV_NAME
5198 " : Module param rx_frag_size must be 2048/4096/8192."
5199 " Using 2048\n");
5200 rx_frag_size = 2048;
5201 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005202
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005203 return pci_register_driver(&be_driver);
5204}
5205module_init(be_init_module);
5206
5207static void __exit be_exit_module(void)
5208{
5209 pci_unregister_driver(&be_driver);
5210}
5211module_exit(be_exit_module);