blob: 41a0a5498da74c7b9b1129b68c9173c1b15470a4 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
Kalesh APe2fb1af2014-09-19 15:46:58 +053089
Ajit Khaparde7c185272010-07-29 06:16:33 +000090/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070091static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000092 "LPCMEMHOST",
93 "MGMT_MAC",
94 "PCS0ONLINE",
95 "MPU_IRAM",
96 "PCS1ONLINE",
97 "PCTL0",
98 "PCTL1",
99 "PMEM",
100 "RR",
101 "TXPB",
102 "RXPP",
103 "XAUI",
104 "TXP",
105 "ARM",
106 "IPC",
107 "HOST2",
108 "HOST3",
109 "HOST4",
110 "HOST5",
111 "HOST6",
112 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530113 "ECRC",
114 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700115 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530116 "PERIPH",
117 "LLTXULP",
118 "D2P",
119 "RCON",
120 "LDMA",
121 "LLTXP",
122 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000123 "Unknown"
124};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125
126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530129
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530138 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 return 0;
151}
152
Somnath Kotur68c45a22013-03-14 02:42:07 +0000153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154{
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530158 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Somnath Kotur68c45a22013-03-14 02:42:07 +0000172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000194
195 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197}
198
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530212 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530215
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530234
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000271 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 }
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000290 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000291 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
dingtianhong61d23e92013-12-30 15:40:43 +0800296 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 status = -EPERM;
298 goto err;
299 }
300
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530302 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 return 0;
304err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 return status;
307}
308
Sathya Perlaca34fe32012-11-06 17:48:56 +0000309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
Ajit Khaparde61000862013-10-03 16:16:33 -0500440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530484 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500492}
493
Selvin Xavier005d5692011-05-16 07:36:35 +0000494static void populate_lancer_stats(struct be_adapter *adapter)
495{
Selvin Xavier005d5692011-05-16 07:36:35 +0000496 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530497 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000498
499 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
500 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
501 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
502 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000503 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000504 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000505 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
506 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
507 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
508 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
509 drvs->rx_dropped_tcp_length =
510 pport_stats->rx_dropped_invalid_tcp_length;
511 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
512 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
513 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
514 drvs->rx_dropped_header_too_small =
515 pport_stats->rx_dropped_header_too_small;
516 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000517 drvs->rx_address_filtered =
518 pport_stats->rx_address_filtered +
519 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000520 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000521 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000522 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
523 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000524 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000525 drvs->forwarded_packets = pport_stats->num_forwards_lo;
526 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000527 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000528 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000529}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000530
Sathya Perla09c1c682011-08-22 19:41:53 +0000531static void accumulate_16bit_val(u32 *acc, u16 val)
532{
533#define lo(x) (x & 0xFFFF)
534#define hi(x) (x & 0xFFFF0000)
535 bool wrapped = val < lo(*acc);
536 u32 newacc = hi(*acc) + val;
537
538 if (wrapped)
539 newacc += 65536;
540 ACCESS_ONCE(*acc) = newacc;
541}
542
Jingoo Han4188e7d2013-08-05 18:02:02 +0900543static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530544 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000545{
546 if (!BEx_chip(adapter))
547 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
548 else
549 /* below erx HW counter can actually wrap around after
550 * 65535. Driver accumulates a 32-bit value
551 */
552 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
553 (u16)erx_stat);
554}
555
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000556void be_parse_stats(struct be_adapter *adapter)
557{
Ajit Khaparde61000862013-10-03 16:16:33 -0500558 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000559 struct be_rx_obj *rxo;
560 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000561 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000562
Sathya Perlaca34fe32012-11-06 17:48:56 +0000563 if (lancer_chip(adapter)) {
564 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000565 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000566 if (BE2_chip(adapter))
567 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500568 else if (BE3_chip(adapter))
569 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000570 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500571 else
572 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000573
Ajit Khaparde61000862013-10-03 16:16:33 -0500574 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000575 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000576 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
577 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000578 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000579 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000580}
581
Sathya Perlaab1594e2011-07-25 19:10:15 +0000582static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530583 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000585 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000586 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700587 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000588 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000589 u64 pkts, bytes;
590 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700591 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592
Sathya Perla3abcded2010-10-03 22:12:27 -0700593 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000594 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530595
Sathya Perlaab1594e2011-07-25 19:10:15 +0000596 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700597 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000598 pkts = rx_stats(rxo)->rx_pkts;
599 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700600 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000601 stats->rx_packets += pkts;
602 stats->rx_bytes += bytes;
603 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
604 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
605 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700606 }
607
Sathya Perla3c8def92011-06-12 20:01:58 +0000608 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000609 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530610
Sathya Perlaab1594e2011-07-25 19:10:15 +0000611 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700612 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000613 pkts = tx_stats(txo)->tx_pkts;
614 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700615 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000616 stats->tx_packets += pkts;
617 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000618 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619
620 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000621 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000622 drvs->rx_alignment_symbol_errors +
623 drvs->rx_in_range_errors +
624 drvs->rx_out_range_errors +
625 drvs->rx_frame_too_long +
626 drvs->rx_dropped_too_small +
627 drvs->rx_dropped_too_short +
628 drvs->rx_dropped_header_too_small +
629 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000630 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700631
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000633 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000634 drvs->rx_out_range_errors +
635 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000636
Sathya Perlaab1594e2011-07-25 19:10:15 +0000637 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700638
639 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000640 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000641
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 /* receiver fifo overrun */
643 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000644 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000645 drvs->rx_input_fifo_overflow_drop +
646 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000647 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648}
649
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000650void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652 struct net_device *netdev = adapter->netdev;
653
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000654 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000655 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000656 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700657 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000658
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530659 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000660 netif_carrier_on(netdev);
661 else
662 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700663}
664
Sathya Perla3c8def92011-06-12 20:01:58 +0000665static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530666 u32 wrb_cnt, u32 copied, u32 gso_segs,
667 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700668{
Sathya Perla3c8def92011-06-12 20:01:58 +0000669 struct be_tx_stats *stats = tx_stats(txo);
670
Sathya Perlaab1594e2011-07-25 19:10:15 +0000671 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000672 stats->tx_reqs++;
673 stats->tx_wrbs += wrb_cnt;
674 stats->tx_bytes += copied;
675 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700676 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000677 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000678 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700679}
680
681/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000682static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530683 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700685 int cnt = (skb->len > skb->data_len);
686
687 cnt += skb_shinfo(skb)->nr_frags;
688
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 /* to account for hdr wrb */
690 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000691 if (lancer_chip(adapter) || !(cnt & 1)) {
692 *dummy = false;
693 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700694 /* add a dummy to make it an even num */
695 cnt++;
696 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000697 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700698 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
699 return cnt;
700}
701
702static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
703{
704 wrb->frag_pa_hi = upper_32_bits(addr);
705 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
706 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000707 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700708}
709
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000710static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530711 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000712{
713 u8 vlan_prio;
714 u16 vlan_tag;
715
716 vlan_tag = vlan_tx_tag_get(skb);
717 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 /* If vlan priority provided by OS is NOT in available bmap */
719 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
720 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
721 adapter->recommended_prio;
722
723 return vlan_tag;
724}
725
Sathya Perlac9c47142014-03-27 10:46:19 +0530726/* Used only for IP tunnel packets */
727static u16 skb_inner_ip_proto(struct sk_buff *skb)
728{
729 return (inner_ip_hdr(skb)->version == 4) ?
730 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
731}
732
733static u16 skb_ip_proto(struct sk_buff *skb)
734{
735 return (ip_hdr(skb)->version == 4) ?
736 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
737}
738
Somnath Koturcc4ce022010-10-21 07:11:14 -0700739static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530740 struct sk_buff *skb, u32 wrb_cnt, u32 len,
741 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700742{
Sathya Perlac9c47142014-03-27 10:46:19 +0530743 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700744
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745 memset(hdr, 0, sizeof(*hdr));
746
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530747 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000749 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530750 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
751 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000752 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530753 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700754 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530755 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530756 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530757 proto = skb_inner_ip_proto(skb);
758 } else {
759 proto = skb_ip_proto(skb);
760 }
761 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530762 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530763 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530764 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700765 }
766
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700767 if (vlan_tx_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530768 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000769 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530770 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700771 }
772
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000773 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530774 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
775 SET_TX_WRB_HDR_BITS(event, hdr, 1);
776 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
777 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778}
779
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000780static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530781 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000782{
783 dma_addr_t dma;
784
785 be_dws_le_to_cpu(wrb, sizeof(*wrb));
786
787 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000788 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000789 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000790 dma_unmap_single(dev, dma, wrb->frag_len,
791 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000792 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000793 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000794 }
795}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796
Sathya Perla3c8def92011-06-12 20:01:58 +0000797static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530798 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
799 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800{
Sathya Perla7101e112010-03-22 20:41:12 +0000801 dma_addr_t busaddr;
802 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000803 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700804 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 struct be_eth_wrb *wrb;
806 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000807 bool map_single = false;
808 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810 hdr = queue_head_node(txq);
811 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000812 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813
David S. Millerebc8d2a2009-06-09 01:01:31 -0700814 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700815 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530816
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000817 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
818 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000819 goto dma_err;
820 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700821 wrb = queue_head_node(txq);
822 wrb_fill(wrb, busaddr, len);
823 be_dws_cpu_to_le(wrb, sizeof(*wrb));
824 queue_head_inc(txq);
825 copied += len;
826 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700827
David S. Millerebc8d2a2009-06-09 01:01:31 -0700828 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530829 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530830
Ian Campbellb061b392011-08-29 23:18:23 +0000831 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000832 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000833 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000834 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700835 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000836 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700837 be_dws_cpu_to_le(wrb, sizeof(*wrb));
838 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000839 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 }
841
842 if (dummy_wrb) {
843 wrb = queue_head_node(txq);
844 wrb_fill(wrb, 0, 0);
845 be_dws_cpu_to_le(wrb, sizeof(*wrb));
846 queue_head_inc(txq);
847 }
848
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000849 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 be_dws_cpu_to_le(hdr, sizeof(*hdr));
851
852 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000853dma_err:
854 txq->head = map_head;
855 while (copied) {
856 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000857 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000858 map_single = false;
859 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530860 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000861 queue_head_inc(txq);
862 }
863 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864}
865
Somnath Kotur93040ae2012-06-26 22:32:10 +0000866static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000867 struct sk_buff *skb,
868 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000869{
870 u16 vlan_tag = 0;
871
872 skb = skb_share_check(skb, GFP_ATOMIC);
873 if (unlikely(!skb))
874 return skb;
875
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000876 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000877 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530878
879 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
880 if (!vlan_tag)
881 vlan_tag = adapter->pvid;
882 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
883 * skip VLAN insertion
884 */
885 if (skip_hw_vlan)
886 *skip_hw_vlan = true;
887 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000888
889 if (vlan_tag) {
Jiri Pirko62749e22014-11-19 14:04:58 +0100890 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
891 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000894 skb->vlan_tci = 0;
895 }
896
897 /* Insert the outer VLAN, if any */
898 if (adapter->qnq_vid) {
899 vlan_tag = adapter->qnq_vid;
Jiri Pirko62749e22014-11-19 14:04:58 +0100900 skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
901 vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000902 if (unlikely(!skb))
903 return skb;
904 if (skip_hw_vlan)
905 *skip_hw_vlan = true;
906 }
907
Somnath Kotur93040ae2012-06-26 22:32:10 +0000908 return skb;
909}
910
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000911static bool be_ipv6_exthdr_check(struct sk_buff *skb)
912{
913 struct ethhdr *eh = (struct ethhdr *)skb->data;
914 u16 offset = ETH_HLEN;
915
916 if (eh->h_proto == htons(ETH_P_IPV6)) {
917 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
918
919 offset += sizeof(struct ipv6hdr);
920 if (ip6h->nexthdr != NEXTHDR_TCP &&
921 ip6h->nexthdr != NEXTHDR_UDP) {
922 struct ipv6_opt_hdr *ehdr =
Kalesh AP504fbf12014-09-19 15:47:00 +0530923 (struct ipv6_opt_hdr *)(skb->data + offset);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000924
925 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
926 if (ehdr->hdrlen == 0xff)
927 return true;
928 }
929 }
930 return false;
931}
932
933static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
934{
935 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
936}
937
Sathya Perla748b5392014-05-09 13:29:13 +0530938static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000939{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000940 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000941}
942
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530943static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
944 struct sk_buff *skb,
945 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700946{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000947 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000948 unsigned int eth_hdr_len;
949 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000950
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000951 /* For padded packets, BE HW modifies tot_len field in IP header
952 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000953 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000954 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000955 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
956 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000957 if (skb->len <= 60 &&
958 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000959 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000960 ip = (struct iphdr *)ip_hdr(skb);
961 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
962 }
963
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000964 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530965 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000966 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530967 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000968 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530969 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000970
Somnath Kotur93040ae2012-06-26 22:32:10 +0000971 /* HW has a bug wherein it will calculate CSUM for VLAN
972 * pkts even though it is disabled.
973 * Manually insert VLAN in pkt.
974 */
975 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000976 vlan_tx_tag_present(skb)) {
977 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000978 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530979 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000980 }
981
982 /* HW may lockup when VLAN HW tagging is requested on
983 * certain ipv6 packets. Drop such pkts if the HW workaround to
984 * skip HW tagging is not enabled by FW.
985 */
986 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Kalesh APcd3307aa2014-09-19 15:47:02 +0530987 (adapter->pvid || adapter->qnq_vid) &&
988 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000989 goto tx_drop;
990
991 /* Manual VLAN tag insertion to prevent:
992 * ASIC lockup when the ASIC inserts VLAN tag into
993 * certain ipv6 packets. Insert VLAN tags in driver,
994 * and set event, completion, vlan bits accordingly
995 * in the Tx WRB.
996 */
997 if (be_ipv6_tx_stall_chk(adapter, skb) &&
998 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000999 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001000 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301001 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001002 }
1003
Sathya Perlaee9c7992013-05-22 23:04:55 +00001004 return skb;
1005tx_drop:
1006 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301007err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001008 return NULL;
1009}
1010
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301011static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1012 struct sk_buff *skb,
1013 bool *skip_hw_vlan)
1014{
1015 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1016 * less may cause a transmit stall on that port. So the work-around is
1017 * to pad short packets (<= 32 bytes) to a 36-byte length.
1018 */
1019 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
Alexander Duyck74b69392014-12-03 08:17:46 -08001020 if (skb_put_padto(skb, 36))
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301021 return NULL;
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301022 }
1023
1024 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1025 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1026 if (!skb)
1027 return NULL;
1028 }
1029
1030 return skb;
1031}
1032
Sathya Perlaee9c7992013-05-22 23:04:55 +00001033static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1034{
1035 struct be_adapter *adapter = netdev_priv(netdev);
1036 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1037 struct be_queue_info *txq = &txo->q;
1038 bool dummy_wrb, stopped = false;
1039 u32 wrb_cnt = 0, copied = 0;
1040 bool skip_hw_vlan = false;
1041 u32 start = txq->head;
1042
1043 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301044 if (!skb) {
1045 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001046 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301047 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001048
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001049 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1052 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001053 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001054 int gso_segs = skb_shinfo(skb)->gso_segs;
1055
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001057 BUG_ON(txo->sent_skb_list[start]);
1058 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001060 /* Ensure txq has space for the next skb; Else stop the queue
1061 * *BEFORE* ringing the tx doorbell, so that we serialze the
1062 * tx compls of the current transmit which'll wake up the queue
1063 */
Sathya Perla7101e112010-03-22 20:41:12 +00001064 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1066 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001067 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 stopped = true;
1069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001071 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001072
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001073 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001074 } else {
1075 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301076 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001077 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079 return NETDEV_TX_OK;
1080}
1081
1082static int be_change_mtu(struct net_device *netdev, int new_mtu)
1083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301085 struct device *dev = &adapter->pdev->dev;
1086
1087 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1088 dev_info(dev, "MTU must be between %d and %d bytes\n",
1089 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090 return -EINVAL;
1091 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301092
1093 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301094 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095 netdev->mtu = new_mtu;
1096 return 0;
1097}
1098
1099/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001100 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1101 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102 */
Sathya Perla10329df2012-06-05 19:37:18 +00001103static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104{
Vasundhara Volam50762662014-09-12 17:39:14 +05301105 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001106 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301107 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001108 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001109
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001110 /* No need to further configure vids if in promiscuous mode */
1111 if (adapter->promiscuous)
1112 return 0;
1113
Sathya Perla92bf14a2013-08-27 16:57:32 +05301114 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001115 goto set_vlan_promisc;
1116
1117 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301118 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1119 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001120
Kalesh AP4d567d92014-05-09 13:29:17 +05301121 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001122 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001123 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301124 if (addl_status(status) ==
1125 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001126 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301127 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001128 } else {
1129 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1130 /* hw VLAN filtering re-enabled. */
1131 status = be_cmd_rx_filter(adapter,
1132 BE_FLAGS_VLAN_PROMISC, OFF);
1133 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301134 dev_info(dev,
1135 "Disabling VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001136 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137 }
1138 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001140
Sathya Perlab31c50a2009-09-17 10:30:13 -07001141 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001142
1143set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301144 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1145 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001146
1147 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1148 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301149 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001150 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1151 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301152 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001153 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154}
1155
Patrick McHardy80d5c362013-04-19 02:04:28 +00001156static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157{
1158 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001159 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001160
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001161 /* Packets with VID 0 are always received by Lancer by default */
1162 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301163 return status;
1164
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301165 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301166 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001167
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301168 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301169 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001170
Somnath Kotura6b74e02014-01-21 15:50:55 +05301171 status = be_vid_config(adapter);
1172 if (status) {
1173 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301174 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301175 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301176
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001177 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178}
1179
Patrick McHardy80d5c362013-04-19 02:04:28 +00001180static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181{
1182 struct be_adapter *adapter = netdev_priv(netdev);
1183
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001184 /* Packets with VID 0 are always received by Lancer by default */
1185 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301186 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001187
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301188 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301189 adapter->vlans_added--;
1190
1191 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192}
1193
Somnath kotur7ad09452014-03-03 14:24:43 +05301194static void be_clear_promisc(struct be_adapter *adapter)
1195{
1196 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301197 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301198
1199 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1200}
1201
Sathya Perlaa54769f2011-10-24 02:45:00 +00001202static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203{
1204 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001205 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206
1207 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001208 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001209 adapter->promiscuous = true;
1210 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001212
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001213 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001214 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301215 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001216 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001217 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001218 }
1219
Sathya Perlae7b909a2009-11-22 22:01:10 +00001220 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001221 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301222 netdev_mc_count(netdev) > be_max_mc(adapter))
1223 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001224
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001225 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1226 struct netdev_hw_addr *ha;
1227 int i = 1; /* First slot is claimed by the Primary MAC */
1228
1229 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1230 be_cmd_pmac_del(adapter, adapter->if_handle,
1231 adapter->pmac_id[i], 0);
1232 }
1233
Sathya Perla92bf14a2013-08-27 16:57:32 +05301234 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001235 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1236 adapter->promiscuous = true;
1237 goto done;
1238 }
1239
1240 netdev_for_each_uc_addr(ha, adapter->netdev) {
1241 adapter->uc_macs++; /* First slot is for Primary MAC */
1242 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1243 adapter->if_handle,
1244 &adapter->pmac_id[adapter->uc_macs], 0);
1245 }
1246 }
1247
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001248 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301249 if (!status) {
1250 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1251 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1252 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001253 }
Kalesh APa0794882014-05-30 19:06:23 +05301254
1255set_mcast_promisc:
1256 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1257 return;
1258
1259 /* Set to MCAST promisc mode if setting MULTICAST address fails
1260 * or if num configured exceeds what we support
1261 */
1262 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1263 if (!status)
1264 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001265done:
1266 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267}
1268
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001269static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1270{
1271 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001272 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001273 int status;
1274
Sathya Perla11ac75e2011-12-13 00:58:50 +00001275 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001276 return -EPERM;
1277
Sathya Perla11ac75e2011-12-13 00:58:50 +00001278 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001279 return -EINVAL;
1280
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301281 /* Proceed further only if user provided MAC is different
1282 * from active MAC
1283 */
1284 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1285 return 0;
1286
Sathya Perla3175d8c2013-07-23 15:25:03 +05301287 if (BEx_chip(adapter)) {
1288 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1289 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001290
Sathya Perla11ac75e2011-12-13 00:58:50 +00001291 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1292 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301293 } else {
1294 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1295 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001296 }
1297
Kalesh APabccf232014-07-17 16:20:24 +05301298 if (status) {
1299 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1300 mac, vf, status);
1301 return be_cmd_status(status);
1302 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001303
Kalesh APabccf232014-07-17 16:20:24 +05301304 ether_addr_copy(vf_cfg->mac_addr, mac);
1305
1306 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001307}
1308
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001309static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301310 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001311{
1312 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001313 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001314
Sathya Perla11ac75e2011-12-13 00:58:50 +00001315 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001316 return -EPERM;
1317
Sathya Perla11ac75e2011-12-13 00:58:50 +00001318 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001319 return -EINVAL;
1320
1321 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001322 vi->max_tx_rate = vf_cfg->tx_rate;
1323 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001324 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1325 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001326 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301327 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001328
1329 return 0;
1330}
1331
Sathya Perla748b5392014-05-09 13:29:13 +05301332static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001333{
1334 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001335 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001336 int status = 0;
1337
Sathya Perla11ac75e2011-12-13 00:58:50 +00001338 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001339 return -EPERM;
1340
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001341 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001342 return -EINVAL;
1343
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001344 if (vlan || qos) {
1345 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301346 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001347 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1348 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001349 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001350 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301351 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1352 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001353 }
1354
Kalesh APabccf232014-07-17 16:20:24 +05301355 if (status) {
1356 dev_err(&adapter->pdev->dev,
1357 "VLAN %d config on VF %d failed : %#x\n", vlan,
1358 vf, status);
1359 return be_cmd_status(status);
1360 }
1361
1362 vf_cfg->vlan_tag = vlan;
1363
1364 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001365}
1366
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001367static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1368 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001369{
1370 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301371 struct device *dev = &adapter->pdev->dev;
1372 int percent_rate, status = 0;
1373 u16 link_speed = 0;
1374 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001375
Sathya Perla11ac75e2011-12-13 00:58:50 +00001376 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001377 return -EPERM;
1378
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001379 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001380 return -EINVAL;
1381
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001382 if (min_tx_rate)
1383 return -EINVAL;
1384
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301385 if (!max_tx_rate)
1386 goto config_qos;
1387
1388 status = be_cmd_link_status_query(adapter, &link_speed,
1389 &link_status, 0);
1390 if (status)
1391 goto err;
1392
1393 if (!link_status) {
1394 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301395 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301396 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001397 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001398
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301399 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1400 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1401 link_speed);
1402 status = -EINVAL;
1403 goto err;
1404 }
1405
1406 /* On Skyhawk the QOS setting must be done only as a % value */
1407 percent_rate = link_speed / 100;
1408 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1409 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1410 percent_rate);
1411 status = -EINVAL;
1412 goto err;
1413 }
1414
1415config_qos:
1416 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001417 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301418 goto err;
1419
1420 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1421 return 0;
1422
1423err:
1424 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1425 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301426 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001427}
Kalesh APe2fb1af2014-09-19 15:46:58 +05301428
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301429static int be_set_vf_link_state(struct net_device *netdev, int vf,
1430 int link_state)
1431{
1432 struct be_adapter *adapter = netdev_priv(netdev);
1433 int status;
1434
1435 if (!sriov_enabled(adapter))
1436 return -EPERM;
1437
1438 if (vf >= adapter->num_vfs)
1439 return -EINVAL;
1440
1441 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301442 if (status) {
1443 dev_err(&adapter->pdev->dev,
1444 "Link state change on VF %d failed: %#x\n", vf, status);
1445 return be_cmd_status(status);
1446 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301447
Kalesh APabccf232014-07-17 16:20:24 +05301448 adapter->vf_cfg[vf].plink_tracking = link_state;
1449
1450 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301451}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001452
Sathya Perla2632baf2013-10-01 16:00:00 +05301453static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1454 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455{
Sathya Perla2632baf2013-10-01 16:00:00 +05301456 aic->rx_pkts_prev = rx_pkts;
1457 aic->tx_reqs_prev = tx_pkts;
1458 aic->jiffies = now;
1459}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001460
Sathya Perla2632baf2013-10-01 16:00:00 +05301461static void be_eqd_update(struct be_adapter *adapter)
1462{
1463 struct be_set_eqd set_eqd[MAX_EVT_QS];
1464 int eqd, i, num = 0, start;
1465 struct be_aic_obj *aic;
1466 struct be_eq_obj *eqo;
1467 struct be_rx_obj *rxo;
1468 struct be_tx_obj *txo;
1469 u64 rx_pkts, tx_pkts;
1470 ulong now;
1471 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001472
Sathya Perla2632baf2013-10-01 16:00:00 +05301473 for_all_evt_queues(adapter, eqo, i) {
1474 aic = &adapter->aic_obj[eqo->idx];
1475 if (!aic->enable) {
1476 if (aic->jiffies)
1477 aic->jiffies = 0;
1478 eqd = aic->et_eqd;
1479 goto modify_eqd;
1480 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481
Sathya Perla2632baf2013-10-01 16:00:00 +05301482 rxo = &adapter->rx_obj[eqo->idx];
1483 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001484 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301485 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001486 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001487
Sathya Perla2632baf2013-10-01 16:00:00 +05301488 txo = &adapter->tx_obj[eqo->idx];
1489 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001490 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301491 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001492 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001493
Sathya Perla2632baf2013-10-01 16:00:00 +05301494 /* Skip, if wrapped around or first calculation */
1495 now = jiffies;
1496 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1497 rx_pkts < aic->rx_pkts_prev ||
1498 tx_pkts < aic->tx_reqs_prev) {
1499 be_aic_update(aic, rx_pkts, tx_pkts, now);
1500 continue;
1501 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001502
Sathya Perla2632baf2013-10-01 16:00:00 +05301503 delta = jiffies_to_msecs(now - aic->jiffies);
1504 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1505 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1506 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001507
Sathya Perla2632baf2013-10-01 16:00:00 +05301508 if (eqd < 8)
1509 eqd = 0;
1510 eqd = min_t(u32, eqd, aic->max_eqd);
1511 eqd = max_t(u32, eqd, aic->min_eqd);
1512
1513 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001514modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301515 if (eqd != aic->prev_eqd) {
1516 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1517 set_eqd[num].eq_id = eqo->q.id;
1518 aic->prev_eqd = eqd;
1519 num++;
1520 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001521 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301522
1523 if (num)
1524 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001525}
1526
Sathya Perla3abcded2010-10-03 22:12:27 -07001527static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301528 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001529{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001530 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001531
Sathya Perlaab1594e2011-07-25 19:10:15 +00001532 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001533 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001534 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001535 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001536 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001537 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001538 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001539 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001540 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541}
1542
Sathya Perla2e588f82011-03-11 02:49:26 +00001543static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001544{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001545 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301546 * Also ignore ipcksm for ipv6 pkts
1547 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001548 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301549 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001550}
1551
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301552static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001554 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001556 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301557 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558
Sathya Perla3abcded2010-10-03 22:12:27 -07001559 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 BUG_ON(!rx_page_info->page);
1561
Sathya Perlae50287b2014-03-04 12:14:38 +05301562 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001563 dma_unmap_page(&adapter->pdev->dev,
1564 dma_unmap_addr(rx_page_info, bus),
1565 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301566 rx_page_info->last_frag = false;
1567 } else {
1568 dma_sync_single_for_cpu(&adapter->pdev->dev,
1569 dma_unmap_addr(rx_page_info, bus),
1570 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001571 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301573 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574 atomic_dec(&rxq->used);
1575 return rx_page_info;
1576}
1577
1578/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001579static void be_rx_compl_discard(struct be_rx_obj *rxo,
1580 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001583 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001585 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301586 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001587 put_page(page_info->page);
1588 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589 }
1590}
1591
1592/*
1593 * skb_fill_rx_data forms a complete skb for an ether frame
1594 * indicated by rxcp.
1595 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001596static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1597 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001600 u16 i, j;
1601 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 u8 *start;
1603
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301604 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605 start = page_address(page_info->page) + page_info->page_offset;
1606 prefetch(start);
1607
1608 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001609 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611 skb->len = curr_frag_len;
1612 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001613 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 /* Complete packet has now been moved to data */
1615 put_page(page_info->page);
1616 skb->data_len = 0;
1617 skb->tail += curr_frag_len;
1618 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001619 hdr_len = ETH_HLEN;
1620 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001622 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623 skb_shinfo(skb)->frags[0].page_offset =
1624 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301625 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1626 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001628 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629 skb->tail += hdr_len;
1630 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001631 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632
Sathya Perla2e588f82011-03-11 02:49:26 +00001633 if (rxcp->pkt_size <= rx_frag_size) {
1634 BUG_ON(rxcp->num_rcvd != 1);
1635 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636 }
1637
1638 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001639 remaining = rxcp->pkt_size - curr_frag_len;
1640 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301641 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001642 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001644 /* Coalesce all frags from the same physical page in one slot */
1645 if (page_info->page_offset == 0) {
1646 /* Fresh page */
1647 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001648 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001649 skb_shinfo(skb)->frags[j].page_offset =
1650 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001651 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001652 skb_shinfo(skb)->nr_frags++;
1653 } else {
1654 put_page(page_info->page);
1655 }
1656
Eric Dumazet9e903e02011-10-18 21:00:24 +00001657 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658 skb->len += curr_frag_len;
1659 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001660 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001661 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001662 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001664 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665}
1666
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001667/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301668static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001669 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001671 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001672 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001674
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001675 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001676 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001677 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001678 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 return;
1680 }
1681
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001682 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001684 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001685 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001686 else
1687 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001689 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001690 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001691 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001692 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301693
Tom Herbertb6c0e892014-08-27 21:27:17 -07001694 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301695 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
Jiri Pirko343e43c2011-08-25 02:50:51 +00001697 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001698 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001699
1700 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701}
1702
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001703/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001704static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1705 struct napi_struct *napi,
1706 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001708 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001710 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001711 u16 remaining, curr_frag_len;
1712 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001713
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001714 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001715 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001716 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001717 return;
1718 }
1719
Sathya Perla2e588f82011-03-11 02:49:26 +00001720 remaining = rxcp->pkt_size;
1721 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301722 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723
1724 curr_frag_len = min(remaining, rx_frag_size);
1725
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001726 /* Coalesce all frags from the same physical page in one slot */
1727 if (i == 0 || page_info->page_offset == 0) {
1728 /* First frag or Fresh page */
1729 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001730 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001731 skb_shinfo(skb)->frags[j].page_offset =
1732 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001733 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001734 } else {
1735 put_page(page_info->page);
1736 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001737 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001738 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740 memset(page_info, 0, sizeof(*page_info));
1741 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001742 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001744 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001745 skb->len = rxcp->pkt_size;
1746 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001747 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001748 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001749 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001750 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301751
Tom Herbertb6c0e892014-08-27 21:27:17 -07001752 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301753 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001754
Jiri Pirko343e43c2011-08-25 02:50:51 +00001755 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001756 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001757
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001758 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759}
1760
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001761static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1762 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301764 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1765 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1766 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1767 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1768 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1769 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1770 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1771 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1772 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1773 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1774 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001775 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301776 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1777 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001778 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301779 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301780 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301781 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001782}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001784static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1785 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001786{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301787 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1788 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1789 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1790 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1791 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1792 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1793 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1794 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1795 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1796 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1797 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001798 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301799 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1800 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001801 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301802 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1803 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001804}
1805
1806static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1807{
1808 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1809 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1810 struct be_adapter *adapter = rxo->adapter;
1811
1812 /* For checking the valid bit it is Ok to use either definition as the
1813 * valid bit is at the same position in both v0 and v1 Rx compl */
1814 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815 return NULL;
1816
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001817 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001818 be_dws_le_to_cpu(compl, sizeof(*compl));
1819
1820 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001821 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001822 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001823 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001824
Somnath Koture38b1702013-05-29 22:55:56 +00001825 if (rxcp->ip_frag)
1826 rxcp->l4_csum = 0;
1827
Sathya Perla15d72182011-03-21 20:49:26 +00001828 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301829 /* In QNQ modes, if qnq bit is not set, then the packet was
1830 * tagged only with the transparent outer vlan-tag and must
1831 * not be treated as a vlan packet by host
1832 */
1833 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001834 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001835
Sathya Perla15d72182011-03-21 20:49:26 +00001836 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001837 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001838
Somnath Kotur939cf302011-08-18 21:51:49 -07001839 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301840 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001841 rxcp->vlanf = 0;
1842 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001843
1844 /* As the compl has been parsed, reset it; we wont touch it again */
1845 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846
Sathya Perla3abcded2010-10-03 22:12:27 -07001847 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848 return rxcp;
1849}
1850
Eric Dumazet1829b082011-03-01 05:48:12 +00001851static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001854
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001856 gfp |= __GFP_COMP;
1857 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858}
1859
1860/*
1861 * Allocate a page, split it to fragments of size rx_frag_size and post as
1862 * receive buffers to BE
1863 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301864static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865{
Sathya Perla3abcded2010-10-03 22:12:27 -07001866 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001867 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001868 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001870 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871 struct be_eth_rx_d *rxd;
1872 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301873 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874
Sathya Perla3abcded2010-10-03 22:12:27 -07001875 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301876 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001878 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001880 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 break;
1882 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001883 page_dmaaddr = dma_map_page(dev, pagep, 0,
1884 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001885 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001886 if (dma_mapping_error(dev, page_dmaaddr)) {
1887 put_page(pagep);
1888 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301889 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001890 break;
1891 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301892 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893 } else {
1894 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301895 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301897 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899
1900 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301901 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1903 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904
1905 /* Any space left in the current big page for another frag? */
1906 if ((page_offset + rx_frag_size + rx_frag_size) >
1907 adapter->big_page_size) {
1908 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301909 page_info->last_frag = true;
1910 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1911 } else {
1912 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001914
1915 prev_page_info = page_info;
1916 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301919
1920 /* Mark the last frag of a page when we break out of the above loop
1921 * with no more slots available in the RXQ
1922 */
1923 if (pagep) {
1924 prev_page_info->last_frag = true;
1925 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1926 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927
1928 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301930 if (rxo->rx_post_starved)
1931 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301932 do {
1933 notify = min(256u, posted);
1934 be_rxq_notify(adapter, rxq->id, notify);
1935 posted -= notify;
1936 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001937 } else if (atomic_read(&rxq->used) == 0) {
1938 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001939 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941}
1942
Sathya Perla5fb379e2009-06-18 00:02:59 +00001943static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1946
1947 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1948 return NULL;
1949
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001950 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1952
1953 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1954
1955 queue_tail_inc(tx_cq);
1956 return txcp;
1957}
1958
Sathya Perla3c8def92011-06-12 20:01:58 +00001959static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301960 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961{
Sathya Perla3c8def92011-06-12 20:01:58 +00001962 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001963 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001964 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001966 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1967 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001969 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001971 sent_skbs[txq->tail] = NULL;
1972
1973 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001974 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001976 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001978 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001979 unmap_tx_frag(&adapter->pdev->dev, wrb,
1980 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001981 unmap_skb_hdr = false;
1982
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983 num_wrbs++;
1984 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001985 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986
Rick Jones96d49222014-08-28 08:53:16 -07001987 dev_consume_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001988 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989}
1990
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001991/* Return the number of events in the event queue */
1992static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001993{
1994 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001995 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001996
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001997 do {
1998 eqe = queue_tail_node(&eqo->q);
1999 if (eqe->evt == 0)
2000 break;
2001
2002 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002003 eqe->evt = 0;
2004 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002005 queue_tail_inc(&eqo->q);
2006 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002007
2008 return num;
2009}
2010
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002011/* Leaves the EQ is disarmed state */
2012static void be_eq_clean(struct be_eq_obj *eqo)
2013{
2014 int num = events_get(eqo);
2015
2016 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2017}
2018
2019static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020{
2021 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002022 struct be_queue_info *rxq = &rxo->q;
2023 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002024 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002025 struct be_adapter *adapter = rxo->adapter;
2026 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027
Sathya Perlad23e9462012-12-17 19:38:51 +00002028 /* Consume pending rx completions.
2029 * Wait for the flush completion (identified by zero num_rcvd)
2030 * to arrive. Notify CQ even when there are no more CQ entries
2031 * for HW to flush partially coalesced CQ entries.
2032 * In Lancer, there is no need to wait for flush compl.
2033 */
2034 for (;;) {
2035 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302036 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002037 if (lancer_chip(adapter))
2038 break;
2039
2040 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2041 dev_warn(&adapter->pdev->dev,
2042 "did not receive flush compl\n");
2043 break;
2044 }
2045 be_cq_notify(adapter, rx_cq->id, true, 0);
2046 mdelay(1);
2047 } else {
2048 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002049 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002050 if (rxcp->num_rcvd == 0)
2051 break;
2052 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053 }
2054
Sathya Perlad23e9462012-12-17 19:38:51 +00002055 /* After cleanup, leave the CQ in unarmed state */
2056 be_cq_notify(adapter, rx_cq->id, false, 0);
2057
2058 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302059 while (atomic_read(&rxq->used) > 0) {
2060 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002061 put_page(page_info->page);
2062 memset(page_info, 0, sizeof(*page_info));
2063 }
2064 BUG_ON(atomic_read(&rxq->used));
Kalesh AP5f820b62014-09-19 15:47:01 +05302065 rxq->tail = 0;
2066 rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002067}
2068
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002069static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002070{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002071 struct be_tx_obj *txo;
2072 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002073 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002074 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002075 struct sk_buff *sent_skb;
2076 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002077 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002078
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302079 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002080 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002081 pending_txqs = adapter->num_tx_qs;
2082
2083 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302084 cmpl = 0;
2085 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002086 txq = &txo->q;
2087 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302088 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002089 num_wrbs += be_tx_compl_process(adapter, txo,
2090 end_idx);
2091 cmpl++;
2092 }
2093 if (cmpl) {
2094 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2095 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302096 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002097 }
2098 if (atomic_read(&txq->used) == 0)
2099 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002100 }
2101
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302102 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002103 break;
2104
2105 mdelay(1);
2106 } while (true);
2107
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002108 for_all_tx_queues(adapter, txo, i) {
2109 txq = &txo->q;
2110 if (atomic_read(&txq->used))
2111 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2112 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002113
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002114 /* free posted tx for which compls will never arrive */
2115 while (atomic_read(&txq->used)) {
2116 sent_skb = txo->sent_skb_list[txq->tail];
2117 end_idx = txq->tail;
2118 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2119 &dummy_wrb);
2120 index_adv(&end_idx, num_wrbs - 1, txq->len);
2121 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2122 atomic_sub(num_wrbs, &txq->used);
2123 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002124 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125}
2126
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002127static void be_evt_queues_destroy(struct be_adapter *adapter)
2128{
2129 struct be_eq_obj *eqo;
2130 int i;
2131
2132 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002133 if (eqo->q.created) {
2134 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002135 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302136 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302137 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002138 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002139 be_queue_free(adapter, &eqo->q);
2140 }
2141}
2142
2143static int be_evt_queues_create(struct be_adapter *adapter)
2144{
2145 struct be_queue_info *eq;
2146 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302147 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002148 int i, rc;
2149
Sathya Perla92bf14a2013-08-27 16:57:32 +05302150 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2151 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002152
2153 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302154 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2155 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302156 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302157 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002158 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002159 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302160 aic->max_eqd = BE_MAX_EQD;
2161 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002162
2163 eq = &eqo->q;
2164 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302165 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002166 if (rc)
2167 return rc;
2168
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302169 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002170 if (rc)
2171 return rc;
2172 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002173 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002174}
2175
Sathya Perla5fb379e2009-06-18 00:02:59 +00002176static void be_mcc_queues_destroy(struct be_adapter *adapter)
2177{
2178 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002179
Sathya Perla8788fdc2009-07-27 22:52:03 +00002180 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002181 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002182 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002183 be_queue_free(adapter, q);
2184
Sathya Perla8788fdc2009-07-27 22:52:03 +00002185 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002186 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002187 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002188 be_queue_free(adapter, q);
2189}
2190
2191/* Must be called only after TX qs are created as MCC shares TX EQ */
2192static int be_mcc_queues_create(struct be_adapter *adapter)
2193{
2194 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002195
Sathya Perla8788fdc2009-07-27 22:52:03 +00002196 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002197 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302198 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002199 goto err;
2200
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002201 /* Use the default EQ for MCC completions */
2202 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002203 goto mcc_cq_free;
2204
Sathya Perla8788fdc2009-07-27 22:52:03 +00002205 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002206 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2207 goto mcc_cq_destroy;
2208
Sathya Perla8788fdc2009-07-27 22:52:03 +00002209 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002210 goto mcc_q_free;
2211
2212 return 0;
2213
2214mcc_q_free:
2215 be_queue_free(adapter, q);
2216mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002217 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002218mcc_cq_free:
2219 be_queue_free(adapter, cq);
2220err:
2221 return -1;
2222}
2223
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002224static void be_tx_queues_destroy(struct be_adapter *adapter)
2225{
2226 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002227 struct be_tx_obj *txo;
2228 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229
Sathya Perla3c8def92011-06-12 20:01:58 +00002230 for_all_tx_queues(adapter, txo, i) {
2231 q = &txo->q;
2232 if (q->created)
2233 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2234 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235
Sathya Perla3c8def92011-06-12 20:01:58 +00002236 q = &txo->cq;
2237 if (q->created)
2238 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2239 be_queue_free(adapter, q);
2240 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002241}
2242
Sathya Perla77071332013-08-27 16:57:34 +05302243static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002245 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002246 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302247 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248
Sathya Perla92bf14a2013-08-27 16:57:32 +05302249 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002250
Sathya Perla3c8def92011-06-12 20:01:58 +00002251 for_all_tx_queues(adapter, txo, i) {
2252 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002253 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2254 sizeof(struct be_eth_tx_compl));
2255 if (status)
2256 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257
John Stultz827da442013-10-07 15:51:58 -07002258 u64_stats_init(&txo->stats.sync);
2259 u64_stats_init(&txo->stats.sync_compl);
2260
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002261 /* If num_evt_qs is less than num_tx_qs, then more than
2262 * one txq share an eq
2263 */
2264 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2265 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2266 if (status)
2267 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002268
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002269 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2270 sizeof(struct be_eth_wrb));
2271 if (status)
2272 return status;
2273
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002274 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002275 if (status)
2276 return status;
2277 }
2278
Sathya Perlad3791422012-09-28 04:39:44 +00002279 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2280 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002281 return 0;
2282}
2283
2284static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002285{
2286 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002287 struct be_rx_obj *rxo;
2288 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289
Sathya Perla3abcded2010-10-03 22:12:27 -07002290 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002291 q = &rxo->cq;
2292 if (q->created)
2293 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2294 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002296}
2297
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002298static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002299{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002300 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002301 struct be_rx_obj *rxo;
2302 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002303
Sathya Perla92bf14a2013-08-27 16:57:32 +05302304 /* We can create as many RSS rings as there are EQs. */
2305 adapter->num_rx_qs = adapter->num_evt_qs;
2306
2307 /* We'll use RSS only if atleast 2 RSS rings are supported.
2308 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002309 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302310 if (adapter->num_rx_qs > 1)
2311 adapter->num_rx_qs++;
2312
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002313 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002314 for_all_rx_queues(adapter, rxo, i) {
2315 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002316 cq = &rxo->cq;
2317 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302318 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002319 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002320 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002321
John Stultz827da442013-10-07 15:51:58 -07002322 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002323 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2324 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002325 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002326 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002327 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328
Sathya Perlad3791422012-09-28 04:39:44 +00002329 dev_info(&adapter->pdev->dev,
2330 "created %d RSS queue(s) and 1 default RX queue\n",
2331 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002332 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002333}
2334
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002335static irqreturn_t be_intx(int irq, void *dev)
2336{
Sathya Perlae49cc342012-11-27 19:50:02 +00002337 struct be_eq_obj *eqo = dev;
2338 struct be_adapter *adapter = eqo->adapter;
2339 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002340
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002341 /* IRQ is not expected when NAPI is scheduled as the EQ
2342 * will not be armed.
2343 * But, this can happen on Lancer INTx where it takes
2344 * a while to de-assert INTx or in BE2 where occasionaly
2345 * an interrupt may be raised even when EQ is unarmed.
2346 * If NAPI is already scheduled, then counting & notifying
2347 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002348 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002349 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002350 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002351 __napi_schedule(&eqo->napi);
2352 if (num_evts)
2353 eqo->spurious_intr = 0;
2354 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002355 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002356
2357 /* Return IRQ_HANDLED only for the the first spurious intr
2358 * after a valid intr to stop the kernel from branding
2359 * this irq as a bad one!
2360 */
2361 if (num_evts || eqo->spurious_intr++ == 0)
2362 return IRQ_HANDLED;
2363 else
2364 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002365}
2366
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002367static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002368{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002369 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002370
Sathya Perla0b545a62012-11-23 00:27:18 +00002371 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2372 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002373 return IRQ_HANDLED;
2374}
2375
Sathya Perla2e588f82011-03-11 02:49:26 +00002376static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002377{
Somnath Koture38b1702013-05-29 22:55:56 +00002378 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002379}
2380
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002381static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302382 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002383{
Sathya Perla3abcded2010-10-03 22:12:27 -07002384 struct be_adapter *adapter = rxo->adapter;
2385 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002386 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002387 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302388 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002389
2390 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002391 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002392 if (!rxcp)
2393 break;
2394
Sathya Perla12004ae2011-08-02 19:57:46 +00002395 /* Is it a flush compl that has no data */
2396 if (unlikely(rxcp->num_rcvd == 0))
2397 goto loop_continue;
2398
2399 /* Discard compl with partial DMA Lancer B0 */
2400 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002401 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002402 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002403 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002404
Sathya Perla12004ae2011-08-02 19:57:46 +00002405 /* On BE drop pkts that arrive due to imperfect filtering in
2406 * promiscuous mode on some skews
2407 */
2408 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302409 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002410 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002411 goto loop_continue;
2412 }
2413
Sathya Perla6384a4d2013-10-25 10:40:16 +05302414 /* Don't do gro when we're busy_polling */
2415 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002416 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002417 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302418 be_rx_compl_process(rxo, napi, rxcp);
2419
Sathya Perla12004ae2011-08-02 19:57:46 +00002420loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302421 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002422 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002423 }
2424
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002425 if (work_done) {
2426 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002427
Sathya Perla6384a4d2013-10-25 10:40:16 +05302428 /* When an rx-obj gets into post_starved state, just
2429 * let be_worker do the posting.
2430 */
2431 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2432 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302433 be_post_rx_frags(rxo, GFP_ATOMIC,
2434 max_t(u32, MAX_RX_POST,
2435 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002436 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002437
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002438 return work_done;
2439}
2440
Kalesh AP512bb8a2014-09-02 09:56:49 +05302441static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2442{
2443 switch (status) {
2444 case BE_TX_COMP_HDR_PARSE_ERR:
2445 tx_stats(txo)->tx_hdr_parse_err++;
2446 break;
2447 case BE_TX_COMP_NDMA_ERR:
2448 tx_stats(txo)->tx_dma_err++;
2449 break;
2450 case BE_TX_COMP_ACL_ERR:
2451 tx_stats(txo)->tx_spoof_check_err++;
2452 break;
2453 }
2454}
2455
2456static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2457{
2458 switch (status) {
2459 case LANCER_TX_COMP_LSO_ERR:
2460 tx_stats(txo)->tx_tso_err++;
2461 break;
2462 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2463 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2464 tx_stats(txo)->tx_spoof_check_err++;
2465 break;
2466 case LANCER_TX_COMP_QINQ_ERR:
2467 tx_stats(txo)->tx_qinq_err++;
2468 break;
2469 case LANCER_TX_COMP_PARITY_ERR:
2470 tx_stats(txo)->tx_internal_parity_err++;
2471 break;
2472 case LANCER_TX_COMP_DMA_ERR:
2473 tx_stats(txo)->tx_dma_err++;
2474 break;
2475 }
2476}
2477
Sathya Perlac8f64612014-09-02 09:56:55 +05302478static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2479 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002481 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302482 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302483 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302484 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002485
Sathya Perlac8f64612014-09-02 09:56:55 +05302486 while ((txcp = be_tx_compl_get(&txo->cq))) {
2487 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2488 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2489 work_done++;
2490
Kalesh AP512bb8a2014-09-02 09:56:49 +05302491 compl_status = GET_TX_COMPL_BITS(status, txcp);
2492 if (compl_status) {
2493 if (lancer_chip(adapter))
2494 lancer_update_tx_err(txo, compl_status);
2495 else
2496 be_update_tx_err(txo, compl_status);
2497 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002498 }
2499
2500 if (work_done) {
2501 be_cq_notify(adapter, txo->cq.id, true, work_done);
2502 atomic_sub(num_wrbs, &txo->q.used);
2503
2504 /* As Tx wrbs have been freed up, wake up netdev queue
2505 * if it was stopped due to lack of tx wrbs. */
2506 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302507 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002508 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002509 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002510
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002511 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2512 tx_stats(txo)->tx_compl += work_done;
2513 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2514 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002515}
Sathya Perla3c8def92011-06-12 20:01:58 +00002516
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302517int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002518{
2519 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2520 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002521 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302522 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302523 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002524
Sathya Perla0b545a62012-11-23 00:27:18 +00002525 num_evts = events_get(eqo);
2526
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302527 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2528 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002529
Sathya Perla6384a4d2013-10-25 10:40:16 +05302530 if (be_lock_napi(eqo)) {
2531 /* This loop will iterate twice for EQ0 in which
2532 * completions of the last RXQ (default one) are also processed
2533 * For other EQs the loop iterates only once
2534 */
2535 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2536 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2537 max_work = max(work, max_work);
2538 }
2539 be_unlock_napi(eqo);
2540 } else {
2541 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002542 }
2543
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002544 if (is_mcc_eqo(eqo))
2545 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002546
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002547 if (max_work < budget) {
2548 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002549 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002550 } else {
2551 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002552 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002553 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002554 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002555}
2556
Sathya Perla6384a4d2013-10-25 10:40:16 +05302557#ifdef CONFIG_NET_RX_BUSY_POLL
2558static int be_busy_poll(struct napi_struct *napi)
2559{
2560 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2561 struct be_adapter *adapter = eqo->adapter;
2562 struct be_rx_obj *rxo;
2563 int i, work = 0;
2564
2565 if (!be_lock_busy_poll(eqo))
2566 return LL_FLUSH_BUSY;
2567
2568 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2569 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2570 if (work)
2571 break;
2572 }
2573
2574 be_unlock_busy_poll(eqo);
2575 return work;
2576}
2577#endif
2578
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002579void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002580{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002581 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2582 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002583 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302584 bool error_detected = false;
2585 struct device *dev = &adapter->pdev->dev;
2586 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002587
Sathya Perlad23e9462012-12-17 19:38:51 +00002588 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002589 return;
2590
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002591 if (lancer_chip(adapter)) {
2592 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2593 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2594 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302595 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002596 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302597 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302598 adapter->hw_error = true;
2599 /* Do not log error messages if its a FW reset */
2600 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2601 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2602 dev_info(dev, "Firmware update in progress\n");
2603 } else {
2604 error_detected = true;
2605 dev_err(dev, "Error detected in the card\n");
2606 dev_err(dev, "ERR: sliport status 0x%x\n",
2607 sliport_status);
2608 dev_err(dev, "ERR: sliport error1 0x%x\n",
2609 sliport_err1);
2610 dev_err(dev, "ERR: sliport error2 0x%x\n",
2611 sliport_err2);
2612 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002613 }
2614 } else {
2615 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302616 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002617 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302618 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002619 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302620 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002621 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302622 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002623
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002624 ue_lo = (ue_lo & ~ue_lo_mask);
2625 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002626
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302627 /* On certain platforms BE hardware can indicate spurious UEs.
2628 * Allow HW to stop working completely in case of a real UE.
2629 * Hence not setting the hw_error for UE detection.
2630 */
2631
2632 if (ue_lo || ue_hi) {
2633 error_detected = true;
2634 dev_err(dev,
2635 "Unrecoverable Error detected in the adapter");
2636 dev_err(dev, "Please reboot server to recover");
2637 if (skyhawk_chip(adapter))
2638 adapter->hw_error = true;
2639 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2640 if (ue_lo & 1)
2641 dev_err(dev, "UE: %s bit set\n",
2642 ue_status_low_desc[i]);
2643 }
2644 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2645 if (ue_hi & 1)
2646 dev_err(dev, "UE: %s bit set\n",
2647 ue_status_hi_desc[i]);
2648 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302649 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002650 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302651 if (error_detected)
2652 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002653}
2654
Sathya Perla8d56ff12009-11-22 22:02:26 +00002655static void be_msix_disable(struct be_adapter *adapter)
2656{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002657 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002658 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002659 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302660 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002661 }
2662}
2663
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002664static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002665{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002666 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002667 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002668
Sathya Perla92bf14a2013-08-27 16:57:32 +05302669 /* If RoCE is supported, program the max number of NIC vectors that
2670 * may be configured via set-channels, along with vectors needed for
2671 * RoCe. Else, just program the number we'll use initially.
2672 */
2673 if (be_roce_supported(adapter))
2674 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2675 2 * num_online_cpus());
2676 else
2677 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002678
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002679 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002680 adapter->msix_entries[i].entry = i;
2681
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002682 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2683 MIN_MSIX_VECTORS, num_vec);
2684 if (num_vec < 0)
2685 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002686
Sathya Perla92bf14a2013-08-27 16:57:32 +05302687 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2688 adapter->num_msix_roce_vec = num_vec / 2;
2689 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2690 adapter->num_msix_roce_vec);
2691 }
2692
2693 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2694
2695 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2696 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002697 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002698
2699fail:
2700 dev_warn(dev, "MSIx enable failed\n");
2701
2702 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2703 if (!be_physfn(adapter))
2704 return num_vec;
2705 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706}
2707
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002708static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302709 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002710{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302711 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002712}
2713
2714static int be_msix_register(struct be_adapter *adapter)
2715{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002716 struct net_device *netdev = adapter->netdev;
2717 struct be_eq_obj *eqo;
2718 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002719
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002720 for_all_evt_queues(adapter, eqo, i) {
2721 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2722 vec = be_msix_vec_get(adapter, eqo);
2723 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002724 if (status)
2725 goto err_msix;
2726 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002727
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002728 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002729err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002730 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2731 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2732 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302733 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002734 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002735 return status;
2736}
2737
2738static int be_irq_register(struct be_adapter *adapter)
2739{
2740 struct net_device *netdev = adapter->netdev;
2741 int status;
2742
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002743 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002744 status = be_msix_register(adapter);
2745 if (status == 0)
2746 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002747 /* INTx is not supported for VF */
2748 if (!be_physfn(adapter))
2749 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002750 }
2751
Sathya Perlae49cc342012-11-27 19:50:02 +00002752 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002753 netdev->irq = adapter->pdev->irq;
2754 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002755 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002756 if (status) {
2757 dev_err(&adapter->pdev->dev,
2758 "INTx request IRQ failed - err %d\n", status);
2759 return status;
2760 }
2761done:
2762 adapter->isr_registered = true;
2763 return 0;
2764}
2765
2766static void be_irq_unregister(struct be_adapter *adapter)
2767{
2768 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002769 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002770 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002771
2772 if (!adapter->isr_registered)
2773 return;
2774
2775 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002776 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002777 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002778 goto done;
2779 }
2780
2781 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002782 for_all_evt_queues(adapter, eqo, i)
2783 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002784
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002785done:
2786 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002787}
2788
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002789static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002790{
2791 struct be_queue_info *q;
2792 struct be_rx_obj *rxo;
2793 int i;
2794
2795 for_all_rx_queues(adapter, rxo, i) {
2796 q = &rxo->q;
2797 if (q->created) {
2798 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002799 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002800 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002801 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002802 }
2803}
2804
Sathya Perla889cd4b2010-05-30 23:33:45 +00002805static int be_close(struct net_device *netdev)
2806{
2807 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002808 struct be_eq_obj *eqo;
2809 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002810
Kalesh APe1ad8e32014-04-14 16:12:41 +05302811 /* This protection is needed as be_close() may be called even when the
2812 * adapter is in cleared state (after eeh perm failure)
2813 */
2814 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2815 return 0;
2816
Parav Pandit045508a2012-03-26 14:27:13 +00002817 be_roce_dev_close(adapter);
2818
Ivan Veceradff345c52013-11-27 08:59:32 +01002819 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2820 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002821 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302822 be_disable_busy_poll(eqo);
2823 }
David S. Miller71237b62013-11-28 18:53:36 -05002824 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002825 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002826
2827 be_async_mcc_disable(adapter);
2828
2829 /* Wait for all pending tx completions to arrive so that
2830 * all tx skbs are freed.
2831 */
Sathya Perlafba87552013-05-08 02:05:50 +00002832 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302833 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002834
2835 be_rx_qs_destroy(adapter);
2836
Ajit Khaparded11a3472013-11-18 10:44:37 -06002837 for (i = 1; i < (adapter->uc_macs + 1); i++)
2838 be_cmd_pmac_del(adapter, adapter->if_handle,
2839 adapter->pmac_id[i], 0);
2840 adapter->uc_macs = 0;
2841
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002842 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002843 if (msix_enabled(adapter))
2844 synchronize_irq(be_msix_vec_get(adapter, eqo));
2845 else
2846 synchronize_irq(netdev->irq);
2847 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002848 }
2849
Sathya Perla889cd4b2010-05-30 23:33:45 +00002850 be_irq_unregister(adapter);
2851
Sathya Perla482c9e72011-06-29 23:33:17 +00002852 return 0;
2853}
2854
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002855static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002856{
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002857 struct rss_info *rss = &adapter->rss_info;
2858 u8 rss_key[RSS_HASH_KEY_LEN];
Sathya Perla482c9e72011-06-29 23:33:17 +00002859 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002860 int rc, i, j;
Sathya Perla482c9e72011-06-29 23:33:17 +00002861
2862 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002863 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2864 sizeof(struct be_eth_rx_d));
2865 if (rc)
2866 return rc;
2867 }
2868
2869 /* The FW would like the default RXQ to be created first */
2870 rxo = default_rxo(adapter);
2871 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2872 adapter->if_handle, false, &rxo->rss_id);
2873 if (rc)
2874 return rc;
2875
2876 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002877 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002878 rx_frag_size, adapter->if_handle,
2879 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002880 if (rc)
2881 return rc;
2882 }
2883
2884 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302885 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2886 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002887 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302888 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002889 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302890 rss->rsstable[j + i] = rxo->rss_id;
2891 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002892 }
2893 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302894 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2895 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002896
2897 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302898 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2899 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302900 } else {
2901 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302902 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302903 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002904
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002905 netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302906 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002907 128, rss_key);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302908 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302909 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302910 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002911 }
2912
Eric Dumazet1dcf7b12014-11-16 06:23:10 -08002913 memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
Venkata Duvvurue2557872014-04-21 15:38:00 +05302914
Sathya Perla482c9e72011-06-29 23:33:17 +00002915 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002916 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302917 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002918 return 0;
2919}
2920
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002921static int be_open(struct net_device *netdev)
2922{
2923 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002924 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002925 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002926 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002927 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002928 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002929
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002930 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002931 if (status)
2932 goto err;
2933
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002934 status = be_irq_register(adapter);
2935 if (status)
2936 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002937
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002938 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002939 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002940
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002941 for_all_tx_queues(adapter, txo, i)
2942 be_cq_notify(adapter, txo->cq.id, true, 0);
2943
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002944 be_async_mcc_enable(adapter);
2945
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002946 for_all_evt_queues(adapter, eqo, i) {
2947 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302948 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302949 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002950 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002951 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002952
Sathya Perla323ff712012-09-28 04:39:43 +00002953 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002954 if (!status)
2955 be_link_status_update(adapter, link_status);
2956
Sathya Perlafba87552013-05-08 02:05:50 +00002957 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002958 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302959
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302960#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302961 if (skyhawk_chip(adapter))
2962 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302963#endif
2964
Sathya Perla889cd4b2010-05-30 23:33:45 +00002965 return 0;
2966err:
2967 be_close(adapter->netdev);
2968 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002969}
2970
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002971static int be_setup_wol(struct be_adapter *adapter, bool enable)
2972{
2973 struct be_dma_mem cmd;
2974 int status = 0;
2975 u8 mac[ETH_ALEN];
2976
2977 memset(mac, 0, ETH_ALEN);
2978
2979 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002980 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2981 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302982 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302983 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002984
2985 if (enable) {
2986 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302987 PCICFG_PM_CONTROL_OFFSET,
2988 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002989 if (status) {
2990 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002991 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002992 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2993 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002994 return status;
2995 }
2996 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302997 adapter->netdev->dev_addr,
2998 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002999 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
3000 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3001 } else {
3002 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3003 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3004 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3005 }
3006
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003007 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003008 return status;
3009}
3010
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003011/*
3012 * Generate a seed MAC address from the PF MAC Address using jhash.
3013 * MAC Address for VFs are assigned incrementally starting from the seed.
3014 * These addresses are programmed in the ASIC by the PF and the VF driver
3015 * queries for the MAC address during its probe.
3016 */
Sathya Perla4c876612013-02-03 20:30:11 +00003017static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003018{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003019 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003020 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003021 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003022 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003023
3024 be_vf_eth_addr_generate(adapter, mac);
3025
Sathya Perla11ac75e2011-12-13 00:58:50 +00003026 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303027 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003028 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003029 vf_cfg->if_handle,
3030 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303031 else
3032 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3033 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003034
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003035 if (status)
3036 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303037 "Mac address assignment failed for VF %d\n",
3038 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003039 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003040 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003041
3042 mac[5] += 1;
3043 }
3044 return status;
3045}
3046
Sathya Perla4c876612013-02-03 20:30:11 +00003047static int be_vfs_mac_query(struct be_adapter *adapter)
3048{
3049 int status, vf;
3050 u8 mac[ETH_ALEN];
3051 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003052
3053 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303054 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3055 mac, vf_cfg->if_handle,
3056 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003057 if (status)
3058 return status;
3059 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3060 }
3061 return 0;
3062}
3063
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003064static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003065{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003066 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003067 u32 vf;
3068
Sathya Perla257a3fe2013-06-14 15:54:51 +05303069 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003070 dev_warn(&adapter->pdev->dev,
3071 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003072 goto done;
3073 }
3074
Sathya Perlab4c1df92013-05-08 02:05:47 +00003075 pci_disable_sriov(adapter->pdev);
3076
Sathya Perla11ac75e2011-12-13 00:58:50 +00003077 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303078 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003079 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3080 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303081 else
3082 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3083 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003084
Sathya Perla11ac75e2011-12-13 00:58:50 +00003085 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3086 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003087done:
3088 kfree(adapter->vf_cfg);
3089 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303090 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003091}
3092
Sathya Perla77071332013-08-27 16:57:34 +05303093static void be_clear_queues(struct be_adapter *adapter)
3094{
3095 be_mcc_queues_destroy(adapter);
3096 be_rx_cqs_destroy(adapter);
3097 be_tx_queues_destroy(adapter);
3098 be_evt_queues_destroy(adapter);
3099}
3100
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303101static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003102{
Sathya Perla191eb752012-02-23 18:50:13 +00003103 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3104 cancel_delayed_work_sync(&adapter->work);
3105 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3106 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303107}
3108
Somnath Koturb05004a2013-12-05 12:08:16 +05303109static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303110{
3111 int i;
3112
Somnath Koturb05004a2013-12-05 12:08:16 +05303113 if (adapter->pmac_id) {
3114 for (i = 0; i < (adapter->uc_macs + 1); i++)
3115 be_cmd_pmac_del(adapter, adapter->if_handle,
3116 adapter->pmac_id[i], 0);
3117 adapter->uc_macs = 0;
3118
3119 kfree(adapter->pmac_id);
3120 adapter->pmac_id = NULL;
3121 }
3122}
3123
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303124#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303125static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3126{
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003127 struct net_device *netdev = adapter->netdev;
3128
Sathya Perlac9c47142014-03-27 10:46:19 +05303129 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3130 be_cmd_manage_iface(adapter, adapter->if_handle,
3131 OP_CONVERT_TUNNEL_TO_NORMAL);
3132
3133 if (adapter->vxlan_port)
3134 be_cmd_set_vxlan_port(adapter, 0);
3135
3136 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3137 adapter->vxlan_port = 0;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05003138
3139 netdev->hw_enc_features = 0;
3140 netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05303141 netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
Sathya Perlac9c47142014-03-27 10:46:19 +05303142}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303143#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303144
Somnath Koturb05004a2013-12-05 12:08:16 +05303145static int be_clear(struct be_adapter *adapter)
3146{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303147 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003148
Sathya Perla11ac75e2011-12-13 00:58:50 +00003149 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003150 be_vf_clear(adapter);
3151
Vasundhara Volambec84e62014-06-30 13:01:32 +05303152 /* Re-configure FW to distribute resources evenly across max-supported
3153 * number of VFs, only when VFs are not already enabled.
3154 */
3155 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3156 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3157 pci_sriov_get_totalvfs(adapter->pdev));
3158
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303159#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303160 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303161#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303162 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303163 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003164
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003165 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003166
Sathya Perla77071332013-08-27 16:57:34 +05303167 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003168
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003169 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303170 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003171 return 0;
3172}
3173
Sathya Perla4c876612013-02-03 20:30:11 +00003174static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003175{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303176 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003177 struct be_vf_cfg *vf_cfg;
3178 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003179 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003180
Sathya Perla4c876612013-02-03 20:30:11 +00003181 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3182 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003183
Sathya Perla4c876612013-02-03 20:30:11 +00003184 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303185 if (!BE3_chip(adapter)) {
3186 status = be_cmd_get_profile_config(adapter, &res,
3187 vf + 1);
3188 if (!status)
3189 cap_flags = res.if_cap_flags;
3190 }
Sathya Perla4c876612013-02-03 20:30:11 +00003191
3192 /* If a FW profile exists, then cap_flags are updated */
3193 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303194 BE_IF_FLAGS_BROADCAST |
3195 BE_IF_FLAGS_MULTICAST);
3196 status =
3197 be_cmd_if_create(adapter, cap_flags, en_flags,
3198 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003199 if (status)
3200 goto err;
3201 }
3202err:
3203 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003204}
3205
Sathya Perla39f1d942012-05-08 19:41:24 +00003206static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003207{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003208 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003209 int vf;
3210
Sathya Perla39f1d942012-05-08 19:41:24 +00003211 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3212 GFP_KERNEL);
3213 if (!adapter->vf_cfg)
3214 return -ENOMEM;
3215
Sathya Perla11ac75e2011-12-13 00:58:50 +00003216 for_all_vfs(adapter, vf_cfg, vf) {
3217 vf_cfg->if_handle = -1;
3218 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003219 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003220 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003221}
3222
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003223static int be_vf_setup(struct be_adapter *adapter)
3224{
Sathya Perla4c876612013-02-03 20:30:11 +00003225 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303226 struct be_vf_cfg *vf_cfg;
3227 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303228 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003229
Sathya Perla257a3fe2013-06-14 15:54:51 +05303230 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003231
3232 status = be_vf_setup_init(adapter);
3233 if (status)
3234 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003235
Sathya Perla4c876612013-02-03 20:30:11 +00003236 if (old_vfs) {
3237 for_all_vfs(adapter, vf_cfg, vf) {
3238 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3239 if (status)
3240 goto err;
3241 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003242
Sathya Perla4c876612013-02-03 20:30:11 +00003243 status = be_vfs_mac_query(adapter);
3244 if (status)
3245 goto err;
3246 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303247 status = be_vfs_if_create(adapter);
3248 if (status)
3249 goto err;
3250
Sathya Perla39f1d942012-05-08 19:41:24 +00003251 status = be_vf_eth_addr_config(adapter);
3252 if (status)
3253 goto err;
3254 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003255
Sathya Perla11ac75e2011-12-13 00:58:50 +00003256 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303257 /* Allow VFs to programs MAC/VLAN filters */
3258 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3259 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3260 status = be_cmd_set_fn_privileges(adapter,
3261 privileges |
3262 BE_PRIV_FILTMGMT,
3263 vf + 1);
3264 if (!status)
3265 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3266 vf);
3267 }
3268
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303269 /* Allow full available bandwidth */
3270 if (!old_vfs)
3271 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003272
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303273 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303274 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303275 be_cmd_set_logical_link_config(adapter,
3276 IFLA_VF_LINK_STATE_AUTO,
3277 vf+1);
3278 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003279 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003280
3281 if (!old_vfs) {
3282 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3283 if (status) {
3284 dev_err(dev, "SRIOV enable failed\n");
3285 adapter->num_vfs = 0;
3286 goto err;
3287 }
3288 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303289
3290 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003291 return 0;
3292err:
Sathya Perla4c876612013-02-03 20:30:11 +00003293 dev_err(dev, "VF setup failed\n");
3294 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003295 return status;
3296}
3297
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303298/* Converting function_mode bits on BE3 to SH mc_type enums */
3299
3300static u8 be_convert_mc_type(u32 function_mode)
3301{
Suresh Reddy66064db2014-06-23 16:41:29 +05303302 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303303 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303304 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303305 return FLEX10;
3306 else if (function_mode & VNIC_MODE)
3307 return vNIC2;
3308 else if (function_mode & UMC_ENABLED)
3309 return UMC;
3310 else
3311 return MC_NONE;
3312}
3313
Sathya Perla92bf14a2013-08-27 16:57:32 +05303314/* On BE2/BE3 FW does not suggest the supported limits */
3315static void BEx_get_resources(struct be_adapter *adapter,
3316 struct be_resources *res)
3317{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303318 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303319
3320 if (be_physfn(adapter))
3321 res->max_uc_mac = BE_UC_PMAC_COUNT;
3322 else
3323 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3324
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303325 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3326
3327 if (be_is_mc(adapter)) {
3328 /* Assuming that there are 4 channels per port,
3329 * when multi-channel is enabled
3330 */
3331 if (be_is_qnq_mode(adapter))
3332 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3333 else
3334 /* In a non-qnq multichannel mode, the pvid
3335 * takes up one vlan entry
3336 */
3337 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3338 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303339 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303340 }
3341
Sathya Perla92bf14a2013-08-27 16:57:32 +05303342 res->max_mcast_mac = BE_MAX_MC;
3343
Vasundhara Volama5243da2014-03-11 18:53:07 +05303344 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3345 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3346 * *only* if it is RSS-capable.
3347 */
3348 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3349 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303350 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303351 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303352 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3353 struct be_resources super_nic_res = {0};
3354
3355 /* On a SuperNIC profile, the driver needs to use the
3356 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3357 */
3358 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3359 /* Some old versions of BE3 FW don't report max_tx_qs value */
3360 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3361 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303362 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303363 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303364
3365 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3366 !use_sriov && be_physfn(adapter))
3367 res->max_rss_qs = (adapter->be3_native) ?
3368 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3369 res->max_rx_qs = res->max_rss_qs + 1;
3370
Suresh Reddye3dc8672014-01-06 13:02:25 +05303371 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303372 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303373 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3374 else
3375 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303376
3377 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3378 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3379 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3380}
3381
Sathya Perla30128032011-11-10 19:17:57 +00003382static void be_setup_init(struct be_adapter *adapter)
3383{
3384 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003385 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003386 adapter->if_handle = -1;
3387 adapter->be3_native = false;
3388 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003389 if (be_physfn(adapter))
3390 adapter->cmd_privileges = MAX_PRIVILEGES;
3391 else
3392 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003393}
3394
Vasundhara Volambec84e62014-06-30 13:01:32 +05303395static int be_get_sriov_config(struct be_adapter *adapter)
3396{
3397 struct device *dev = &adapter->pdev->dev;
3398 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303399 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303400
3401 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303402 be_cmd_get_profile_config(adapter, &res, 0);
3403
Vasundhara Volambec84e62014-06-30 13:01:32 +05303404 if (BE3_chip(adapter) && !res.max_vfs) {
3405 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3406 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3407 }
3408
Sathya Perlad3d18312014-08-01 17:47:30 +05303409 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303410
3411 if (!be_max_vfs(adapter)) {
3412 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303413 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303414 adapter->num_vfs = 0;
3415 return 0;
3416 }
3417
Sathya Perlad3d18312014-08-01 17:47:30 +05303418 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3419
Vasundhara Volambec84e62014-06-30 13:01:32 +05303420 /* validate num_vfs module param */
3421 old_vfs = pci_num_vf(adapter->pdev);
3422 if (old_vfs) {
3423 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3424 if (old_vfs != num_vfs)
3425 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3426 adapter->num_vfs = old_vfs;
3427 } else {
3428 if (num_vfs > be_max_vfs(adapter)) {
3429 dev_info(dev, "Resources unavailable to init %d VFs\n",
3430 num_vfs);
3431 dev_info(dev, "Limiting to %d VFs\n",
3432 be_max_vfs(adapter));
3433 }
3434 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3435 }
3436
3437 return 0;
3438}
3439
Sathya Perla92bf14a2013-08-27 16:57:32 +05303440static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003441{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303442 struct device *dev = &adapter->pdev->dev;
3443 struct be_resources res = {0};
3444 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003445
Sathya Perla92bf14a2013-08-27 16:57:32 +05303446 if (BEx_chip(adapter)) {
3447 BEx_get_resources(adapter, &res);
3448 adapter->res = res;
3449 }
3450
Sathya Perla92bf14a2013-08-27 16:57:32 +05303451 /* For Lancer, SH etc read per-function resource limits from FW.
3452 * GET_FUNC_CONFIG returns per function guaranteed limits.
3453 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3454 */
Sathya Perla4c876612013-02-03 20:30:11 +00003455 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303456 status = be_cmd_get_func_config(adapter, &res);
3457 if (status)
3458 return status;
3459
3460 /* If RoCE may be enabled stash away half the EQs for RoCE */
3461 if (be_roce_supported(adapter))
3462 res.max_evt_qs /= 2;
3463 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003464 }
3465
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303466 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3467 be_max_txqs(adapter), be_max_rxqs(adapter),
3468 be_max_rss(adapter), be_max_eqs(adapter),
3469 be_max_vfs(adapter));
3470 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3471 be_max_uc(adapter), be_max_mc(adapter),
3472 be_max_vlans(adapter));
3473
Sathya Perla92bf14a2013-08-27 16:57:32 +05303474 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003475}
3476
Sathya Perlad3d18312014-08-01 17:47:30 +05303477static void be_sriov_config(struct be_adapter *adapter)
3478{
3479 struct device *dev = &adapter->pdev->dev;
3480 int status;
3481
3482 status = be_get_sriov_config(adapter);
3483 if (status) {
3484 dev_err(dev, "Failed to query SR-IOV configuration\n");
3485 dev_err(dev, "SR-IOV cannot be enabled\n");
3486 return;
3487 }
3488
3489 /* When the HW is in SRIOV capable configuration, the PF-pool
3490 * resources are equally distributed across the max-number of
3491 * VFs. The user may request only a subset of the max-vfs to be
3492 * enabled. Based on num_vfs, redistribute the resources across
3493 * num_vfs so that each VF will have access to more number of
3494 * resources. This facility is not available in BE3 FW.
3495 * Also, this is done by FW in Lancer chip.
3496 */
3497 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3498 status = be_cmd_set_sriov_config(adapter,
3499 adapter->pool_res,
3500 adapter->num_vfs);
3501 if (status)
3502 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3503 }
3504}
3505
Sathya Perla39f1d942012-05-08 19:41:24 +00003506static int be_get_config(struct be_adapter *adapter)
3507{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303508 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003509 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003510
Kalesh APe97e3cd2014-07-17 16:20:26 +05303511 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003512 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303513 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003514
Vasundhara Volam542963b2014-01-15 13:23:33 +05303515 if (be_physfn(adapter)) {
3516 status = be_cmd_get_active_profile(adapter, &profile_id);
3517 if (!status)
3518 dev_info(&adapter->pdev->dev,
3519 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303520 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303521
Sathya Perlad3d18312014-08-01 17:47:30 +05303522 if (!BE2_chip(adapter) && be_physfn(adapter))
3523 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303524
Sathya Perla92bf14a2013-08-27 16:57:32 +05303525 status = be_get_resources(adapter);
3526 if (status)
3527 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003528
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303529 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3530 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303531 if (!adapter->pmac_id)
3532 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003533
Sathya Perla92bf14a2013-08-27 16:57:32 +05303534 /* Sanitize cfg_num_qs based on HW and platform limits */
3535 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3536
3537 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003538}
3539
Sathya Perla95046b92013-07-23 15:25:02 +05303540static int be_mac_setup(struct be_adapter *adapter)
3541{
3542 u8 mac[ETH_ALEN];
3543 int status;
3544
3545 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3546 status = be_cmd_get_perm_mac(adapter, mac);
3547 if (status)
3548 return status;
3549
3550 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3551 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3552 } else {
3553 /* Maybe the HW was reset; dev_addr must be re-programmed */
3554 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3555 }
3556
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003557 /* For BE3-R VFs, the PF programs the initial MAC address */
3558 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3559 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3560 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303561 return 0;
3562}
3563
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303564static void be_schedule_worker(struct be_adapter *adapter)
3565{
3566 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3567 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3568}
3569
Sathya Perla77071332013-08-27 16:57:34 +05303570static int be_setup_queues(struct be_adapter *adapter)
3571{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303572 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303573 int status;
3574
3575 status = be_evt_queues_create(adapter);
3576 if (status)
3577 goto err;
3578
3579 status = be_tx_qs_create(adapter);
3580 if (status)
3581 goto err;
3582
3583 status = be_rx_cqs_create(adapter);
3584 if (status)
3585 goto err;
3586
3587 status = be_mcc_queues_create(adapter);
3588 if (status)
3589 goto err;
3590
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303591 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3592 if (status)
3593 goto err;
3594
3595 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3596 if (status)
3597 goto err;
3598
Sathya Perla77071332013-08-27 16:57:34 +05303599 return 0;
3600err:
3601 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3602 return status;
3603}
3604
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303605int be_update_queues(struct be_adapter *adapter)
3606{
3607 struct net_device *netdev = adapter->netdev;
3608 int status;
3609
3610 if (netif_running(netdev))
3611 be_close(netdev);
3612
3613 be_cancel_worker(adapter);
3614
3615 /* If any vectors have been shared with RoCE we cannot re-program
3616 * the MSIx table.
3617 */
3618 if (!adapter->num_msix_roce_vec)
3619 be_msix_disable(adapter);
3620
3621 be_clear_queues(adapter);
3622
3623 if (!msix_enabled(adapter)) {
3624 status = be_msix_enable(adapter);
3625 if (status)
3626 return status;
3627 }
3628
3629 status = be_setup_queues(adapter);
3630 if (status)
3631 return status;
3632
3633 be_schedule_worker(adapter);
3634
3635 if (netif_running(netdev))
3636 status = be_open(netdev);
3637
3638 return status;
3639}
3640
Sathya Perla5fb379e2009-06-18 00:02:59 +00003641static int be_setup(struct be_adapter *adapter)
3642{
Sathya Perla39f1d942012-05-08 19:41:24 +00003643 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303644 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003645 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003646
Sathya Perla30128032011-11-10 19:17:57 +00003647 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003648
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003649 if (!lancer_chip(adapter))
3650 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003651
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003652 status = be_get_config(adapter);
3653 if (status)
3654 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003655
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003656 status = be_msix_enable(adapter);
3657 if (status)
3658 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003659
Sathya Perla77071332013-08-27 16:57:34 +05303660 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3661 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3662 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3663 en_flags |= BE_IF_FLAGS_RSS;
3664 en_flags = en_flags & be_if_cap_flags(adapter);
3665 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3666 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003667 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003668 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003669
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303670 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3671 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303672 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303673 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003674 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003675 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003676
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003677 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003678
Sathya Perla95046b92013-07-23 15:25:02 +05303679 status = be_mac_setup(adapter);
3680 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003681 goto err;
3682
Kalesh APe97e3cd2014-07-17 16:20:26 +05303683 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303684 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003685
Somnath Koture9e2a902013-10-24 14:37:53 +05303686 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303687 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303688 adapter->fw_ver);
3689 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3690 }
3691
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003692 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003693 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003694
3695 be_set_rx_mode(adapter->netdev);
3696
Suresh Reddy76a9e082014-01-15 13:23:40 +05303697 be_cmd_get_acpi_wol_cap(adapter);
3698
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003699 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003700
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003701 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3702 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003703 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003704
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303705 if (be_physfn(adapter))
3706 be_cmd_set_logical_link_config(adapter,
3707 IFLA_VF_LINK_STATE_AUTO, 0);
3708
Vasundhara Volambec84e62014-06-30 13:01:32 +05303709 if (adapter->num_vfs)
3710 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003711
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003712 status = be_cmd_get_phy_info(adapter);
3713 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003714 adapter->phy.fc_autoneg = 1;
3715
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303716 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303717 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003718 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003719err:
3720 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003721 return status;
3722}
3723
Ivan Vecera66268732011-12-08 01:31:21 +00003724#ifdef CONFIG_NET_POLL_CONTROLLER
3725static void be_netpoll(struct net_device *netdev)
3726{
3727 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003728 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003729 int i;
3730
Sathya Perlae49cc342012-11-27 19:50:02 +00003731 for_all_evt_queues(adapter, eqo, i) {
3732 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3733 napi_schedule(&eqo->napi);
3734 }
Ivan Vecera66268732011-12-08 01:31:21 +00003735}
3736#endif
3737
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303738static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003739
Sathya Perla306f1342011-08-02 19:57:45 +00003740static bool phy_flashing_required(struct be_adapter *adapter)
3741{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003742 return (adapter->phy.phy_type == TN_8022 &&
3743 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003744}
3745
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003746static bool is_comp_in_ufi(struct be_adapter *adapter,
3747 struct flash_section_info *fsec, int type)
3748{
3749 int i = 0, img_type = 0;
3750 struct flash_section_info_g2 *fsec_g2 = NULL;
3751
Sathya Perlaca34fe32012-11-06 17:48:56 +00003752 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003753 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3754
3755 for (i = 0; i < MAX_FLASH_COMP; i++) {
3756 if (fsec_g2)
3757 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3758 else
3759 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3760
3761 if (img_type == type)
3762 return true;
3763 }
3764 return false;
3765
3766}
3767
Jingoo Han4188e7d2013-08-05 18:02:02 +09003768static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303769 int header_size,
3770 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003771{
3772 struct flash_section_info *fsec = NULL;
3773 const u8 *p = fw->data;
3774
3775 p += header_size;
3776 while (p < (fw->data + fw->size)) {
3777 fsec = (struct flash_section_info *)p;
3778 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3779 return fsec;
3780 p += 32;
3781 }
3782 return NULL;
3783}
3784
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303785static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3786 u32 img_offset, u32 img_size, int hdr_size,
3787 u16 img_optype, bool *crc_match)
3788{
3789 u32 crc_offset;
3790 int status;
3791 u8 crc[4];
3792
3793 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3794 if (status)
3795 return status;
3796
3797 crc_offset = hdr_size + img_offset + img_size - 4;
3798
3799 /* Skip flashing, if crc of flashed region matches */
3800 if (!memcmp(crc, p + crc_offset, 4))
3801 *crc_match = true;
3802 else
3803 *crc_match = false;
3804
3805 return status;
3806}
3807
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003808static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303809 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003810{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003811 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303812 u32 total_bytes, flash_op, num_bytes;
3813 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003814
3815 total_bytes = img_size;
3816 while (total_bytes) {
3817 num_bytes = min_t(u32, 32*1024, total_bytes);
3818
3819 total_bytes -= num_bytes;
3820
3821 if (!total_bytes) {
3822 if (optype == OPTYPE_PHY_FW)
3823 flash_op = FLASHROM_OPER_PHY_FLASH;
3824 else
3825 flash_op = FLASHROM_OPER_FLASH;
3826 } else {
3827 if (optype == OPTYPE_PHY_FW)
3828 flash_op = FLASHROM_OPER_PHY_SAVE;
3829 else
3830 flash_op = FLASHROM_OPER_SAVE;
3831 }
3832
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003833 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003834 img += num_bytes;
3835 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303836 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303837 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303838 optype == OPTYPE_PHY_FW)
3839 break;
3840 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003841 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003842 }
3843 return 0;
3844}
3845
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003846/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003847static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303848 const struct firmware *fw,
3849 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003850{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003851 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303852 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003853 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303854 int status, i, filehdr_size, num_comp;
3855 const struct flash_comp *pflashcomp;
3856 bool crc_match;
3857 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003858
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003859 struct flash_comp gen3_flash_types[] = {
3860 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3861 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3862 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3863 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3864 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3865 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3866 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3867 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3868 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3869 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3870 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3871 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3872 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3873 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3874 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3875 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3876 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3877 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3878 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3879 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003880 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003881
3882 struct flash_comp gen2_flash_types[] = {
3883 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3884 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3885 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3886 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3887 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3888 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3889 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3890 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3891 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3892 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3893 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3894 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3895 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3896 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3897 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3898 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003899 };
3900
Sathya Perlaca34fe32012-11-06 17:48:56 +00003901 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003902 pflashcomp = gen3_flash_types;
3903 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003904 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003905 } else {
3906 pflashcomp = gen2_flash_types;
3907 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003908 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003909 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003910
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003911 /* Get flash section info*/
3912 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3913 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303914 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003915 return -1;
3916 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003917 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003918 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003919 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003920
3921 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3922 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3923 continue;
3924
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003925 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3926 !phy_flashing_required(adapter))
3927 continue;
3928
3929 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303930 status = be_check_flash_crc(adapter, fw->data,
3931 pflashcomp[i].offset,
3932 pflashcomp[i].size,
3933 filehdr_size +
3934 img_hdrs_size,
3935 OPTYPE_REDBOOT, &crc_match);
3936 if (status) {
3937 dev_err(dev,
3938 "Could not get CRC for 0x%x region\n",
3939 pflashcomp[i].optype);
3940 continue;
3941 }
3942
3943 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003944 continue;
3945 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003946
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303947 p = fw->data + filehdr_size + pflashcomp[i].offset +
3948 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003949 if (p + pflashcomp[i].size > fw->data + fw->size)
3950 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003951
3952 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303953 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003954 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303955 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003956 pflashcomp[i].img_type);
3957 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003958 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003959 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003960 return 0;
3961}
3962
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303963static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3964{
3965 u32 img_type = le32_to_cpu(fsec_entry.type);
3966 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3967
3968 if (img_optype != 0xFFFF)
3969 return img_optype;
3970
3971 switch (img_type) {
3972 case IMAGE_FIRMWARE_iSCSI:
3973 img_optype = OPTYPE_ISCSI_ACTIVE;
3974 break;
3975 case IMAGE_BOOT_CODE:
3976 img_optype = OPTYPE_REDBOOT;
3977 break;
3978 case IMAGE_OPTION_ROM_ISCSI:
3979 img_optype = OPTYPE_BIOS;
3980 break;
3981 case IMAGE_OPTION_ROM_PXE:
3982 img_optype = OPTYPE_PXE_BIOS;
3983 break;
3984 case IMAGE_OPTION_ROM_FCoE:
3985 img_optype = OPTYPE_FCOE_BIOS;
3986 break;
3987 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3988 img_optype = OPTYPE_ISCSI_BACKUP;
3989 break;
3990 case IMAGE_NCSI:
3991 img_optype = OPTYPE_NCSI_FW;
3992 break;
3993 case IMAGE_FLASHISM_JUMPVECTOR:
3994 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3995 break;
3996 case IMAGE_FIRMWARE_PHY:
3997 img_optype = OPTYPE_SH_PHY_FW;
3998 break;
3999 case IMAGE_REDBOOT_DIR:
4000 img_optype = OPTYPE_REDBOOT_DIR;
4001 break;
4002 case IMAGE_REDBOOT_CONFIG:
4003 img_optype = OPTYPE_REDBOOT_CONFIG;
4004 break;
4005 case IMAGE_UFI_DIR:
4006 img_optype = OPTYPE_UFI_DIR;
4007 break;
4008 default:
4009 break;
4010 }
4011
4012 return img_optype;
4013}
4014
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004015static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304016 const struct firmware *fw,
4017 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004018{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004019 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304020 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004021 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304022 u32 img_offset, img_size, img_type;
4023 int status, i, filehdr_size;
4024 bool crc_match, old_fw_img;
4025 u16 img_optype;
4026 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004027
4028 filehdr_size = sizeof(struct flash_file_hdr_g3);
4029 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4030 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304031 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304032 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004033 }
4034
4035 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4036 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4037 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304038 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4039 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4040 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004041
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304042 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004043 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304044 /* Don't bother verifying CRC if an old FW image is being
4045 * flashed
4046 */
4047 if (old_fw_img)
4048 goto flash;
4049
4050 status = be_check_flash_crc(adapter, fw->data, img_offset,
4051 img_size, filehdr_size +
4052 img_hdrs_size, img_optype,
4053 &crc_match);
4054 /* The current FW image on the card does not recognize the new
4055 * FLASH op_type. The FW download is partially complete.
4056 * Reboot the server now to enable FW image to recognize the
4057 * new FLASH op_type. To complete the remaining process,
4058 * download the same FW again after the reboot.
4059 */
Kalesh AP4c600052014-05-30 19:06:26 +05304060 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4061 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304062 dev_err(dev, "Flash incomplete. Reset the server\n");
4063 dev_err(dev, "Download FW image again after reset\n");
4064 return -EAGAIN;
4065 } else if (status) {
4066 dev_err(dev, "Could not get CRC for 0x%x region\n",
4067 img_optype);
4068 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004069 }
4070
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304071 if (crc_match)
4072 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004073
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304074flash:
4075 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004076 if (p + img_size > fw->data + fw->size)
4077 return -1;
4078
4079 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304080 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4081 * UFI_DIR region
4082 */
Kalesh AP4c600052014-05-30 19:06:26 +05304083 if (old_fw_img &&
4084 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4085 (img_optype == OPTYPE_UFI_DIR &&
4086 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304087 continue;
4088 } else if (status) {
4089 dev_err(dev, "Flashing section type 0x%x failed\n",
4090 img_type);
4091 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004092 }
4093 }
4094 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004095}
4096
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004097static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304098 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004099{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004100#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4101#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304102 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004103 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004104 const u8 *data_ptr = NULL;
4105 u8 *dest_image_ptr = NULL;
4106 size_t image_size = 0;
4107 u32 chunk_size = 0;
4108 u32 data_written = 0;
4109 u32 offset = 0;
4110 int status = 0;
4111 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004112 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004113
4114 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304115 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304116 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004117 }
4118
4119 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4120 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304121 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004122 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304123 if (!flash_cmd.va)
4124 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004125
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004126 dest_image_ptr = flash_cmd.va +
4127 sizeof(struct lancer_cmd_req_write_object);
4128 image_size = fw->size;
4129 data_ptr = fw->data;
4130
4131 while (image_size) {
4132 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4133
4134 /* Copy the image chunk content. */
4135 memcpy(dest_image_ptr, data_ptr, chunk_size);
4136
4137 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004138 chunk_size, offset,
4139 LANCER_FW_DOWNLOAD_LOCATION,
4140 &data_written, &change_status,
4141 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004142 if (status)
4143 break;
4144
4145 offset += data_written;
4146 data_ptr += data_written;
4147 image_size -= data_written;
4148 }
4149
4150 if (!status) {
4151 /* Commit the FW written */
4152 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004153 0, offset,
4154 LANCER_FW_DOWNLOAD_LOCATION,
4155 &data_written, &change_status,
4156 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004157 }
4158
Kalesh APbb864e02014-09-02 09:56:51 +05304159 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004160 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304161 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304162 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004163 }
4164
Kalesh APbb864e02014-09-02 09:56:51 +05304165 dev_info(dev, "Firmware flashed successfully\n");
4166
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004167 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304168 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004169 status = lancer_physdev_ctrl(adapter,
4170 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004171 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304172 dev_err(dev, "Adapter busy, could not reset FW\n");
4173 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004174 }
4175 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304176 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004177 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304178
4179 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004180}
4181
Sathya Perlaca34fe32012-11-06 17:48:56 +00004182#define UFI_TYPE2 2
4183#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004184#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004185#define UFI_TYPE4 4
4186static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004187 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004188{
Kalesh APddf11692014-07-17 16:20:28 +05304189 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004190 goto be_get_ufi_exit;
4191
Sathya Perlaca34fe32012-11-06 17:48:56 +00004192 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4193 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004194 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4195 if (fhdr->asic_type_rev == 0x10)
4196 return UFI_TYPE3R;
4197 else
4198 return UFI_TYPE3;
4199 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004200 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004201
4202be_get_ufi_exit:
4203 dev_err(&adapter->pdev->dev,
4204 "UFI and Interface are not compatible for flashing\n");
4205 return -1;
4206}
4207
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004208static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4209{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004210 struct flash_file_hdr_g3 *fhdr3;
4211 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004212 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004213 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004214 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004215
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004216 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004217 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4218 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004219 if (!flash_cmd.va) {
4220 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004221 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004222 }
4223
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004224 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004225 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004226
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004227 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004228
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004229 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4230 for (i = 0; i < num_imgs; i++) {
4231 img_hdr_ptr = (struct image_hdr *)(fw->data +
4232 (sizeof(struct flash_file_hdr_g3) +
4233 i * sizeof(struct image_hdr)));
4234 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004235 switch (ufi_type) {
4236 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004237 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304238 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004239 break;
4240 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004241 status = be_flash_BEx(adapter, fw, &flash_cmd,
4242 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004243 break;
4244 case UFI_TYPE3:
4245 /* Do not flash this ufi on BE3-R cards */
4246 if (adapter->asic_rev < 0x10)
4247 status = be_flash_BEx(adapter, fw,
4248 &flash_cmd,
4249 num_imgs);
4250 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304251 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004252 dev_err(&adapter->pdev->dev,
4253 "Can't load BE3 UFI on BE3R\n");
4254 }
4255 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004256 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004257 }
4258
Sathya Perlaca34fe32012-11-06 17:48:56 +00004259 if (ufi_type == UFI_TYPE2)
4260 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004261 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304262 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004263
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004264 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4265 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004266 if (status) {
4267 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004268 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004269 }
4270
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004271 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004272
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004273be_fw_exit:
4274 return status;
4275}
4276
4277int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4278{
4279 const struct firmware *fw;
4280 int status;
4281
4282 if (!netif_running(adapter->netdev)) {
4283 dev_err(&adapter->pdev->dev,
4284 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304285 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004286 }
4287
4288 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4289 if (status)
4290 goto fw_exit;
4291
4292 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4293
4294 if (lancer_chip(adapter))
4295 status = lancer_fw_download(adapter, fw);
4296 else
4297 status = be_fw_download(adapter, fw);
4298
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004299 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304300 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004301
Ajit Khaparde84517482009-09-04 03:12:16 +00004302fw_exit:
4303 release_firmware(fw);
4304 return status;
4305}
4306
Sathya Perla748b5392014-05-09 13:29:13 +05304307static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004308{
4309 struct be_adapter *adapter = netdev_priv(dev);
4310 struct nlattr *attr, *br_spec;
4311 int rem;
4312 int status = 0;
4313 u16 mode = 0;
4314
4315 if (!sriov_enabled(adapter))
4316 return -EOPNOTSUPP;
4317
4318 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
Thomas Graf4ea85e82014-11-26 13:42:18 +01004319 if (!br_spec)
4320 return -EINVAL;
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004321
4322 nla_for_each_nested(attr, br_spec, rem) {
4323 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4324 continue;
4325
Thomas Grafb7c1a312014-11-26 13:42:17 +01004326 if (nla_len(attr) < sizeof(mode))
4327 return -EINVAL;
4328
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004329 mode = nla_get_u16(attr);
4330 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4331 return -EINVAL;
4332
4333 status = be_cmd_set_hsw_config(adapter, 0, 0,
4334 adapter->if_handle,
4335 mode == BRIDGE_MODE_VEPA ?
4336 PORT_FWD_TYPE_VEPA :
4337 PORT_FWD_TYPE_VEB);
4338 if (status)
4339 goto err;
4340
4341 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4342 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4343
4344 return status;
4345 }
4346err:
4347 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4348 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4349
4350 return status;
4351}
4352
4353static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304354 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004355{
4356 struct be_adapter *adapter = netdev_priv(dev);
4357 int status = 0;
4358 u8 hsw_mode;
4359
4360 if (!sriov_enabled(adapter))
4361 return 0;
4362
4363 /* BE and Lancer chips support VEB mode only */
4364 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4365 hsw_mode = PORT_FWD_TYPE_VEB;
4366 } else {
4367 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4368 adapter->if_handle, &hsw_mode);
4369 if (status)
4370 return 0;
4371 }
4372
4373 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4374 hsw_mode == PORT_FWD_TYPE_VEPA ?
Scott Feldman2c3c0312014-11-28 14:34:25 +01004375 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
4376 0, 0);
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004377}
4378
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304379#ifdef CONFIG_BE2NET_VXLAN
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004380/* VxLAN offload Notes:
4381 *
4382 * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
4383 * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
4384 * is expected to work across all types of IP tunnels once exported. Skyhawk
4385 * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
4386 * offloads in hw_enc_features only when a VxLAN port is added. Note this only
4387 * ensures that other tunnels work fine while VxLAN offloads are not enabled.
4388 *
4389 * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
4390 * adds more than one port, disable offloads and don't re-enable them again
4391 * until after all the tunnels are removed.
4392 */
Sathya Perlac9c47142014-03-27 10:46:19 +05304393static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4394 __be16 port)
4395{
4396 struct be_adapter *adapter = netdev_priv(netdev);
4397 struct device *dev = &adapter->pdev->dev;
4398 int status;
4399
4400 if (lancer_chip(adapter) || BEx_chip(adapter))
4401 return;
4402
4403 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
Sathya Perlac9c47142014-03-27 10:46:19 +05304404 dev_info(dev,
4405 "Only one UDP port supported for VxLAN offloads\n");
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004406 dev_info(dev, "Disabling VxLAN offloads\n");
4407 adapter->vxlan_port_count++;
4408 goto err;
Sathya Perlac9c47142014-03-27 10:46:19 +05304409 }
4410
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004411 if (adapter->vxlan_port_count++ >= 1)
4412 return;
4413
Sathya Perlac9c47142014-03-27 10:46:19 +05304414 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4415 OP_CONVERT_NORMAL_TO_TUNNEL);
4416 if (status) {
4417 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4418 goto err;
4419 }
4420
4421 status = be_cmd_set_vxlan_port(adapter, port);
4422 if (status) {
4423 dev_warn(dev, "Failed to add VxLAN port\n");
4424 goto err;
4425 }
4426 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4427 adapter->vxlan_port = port;
4428
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004429 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4430 NETIF_F_TSO | NETIF_F_TSO6 |
4431 NETIF_F_GSO_UDP_TUNNEL;
4432 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatnaac9a3d82014-12-19 10:00:18 +05304433 netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004434
Sathya Perlac9c47142014-03-27 10:46:19 +05304435 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4436 be16_to_cpu(port));
4437 return;
4438err:
4439 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304440}
4441
4442static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4443 __be16 port)
4444{
4445 struct be_adapter *adapter = netdev_priv(netdev);
4446
4447 if (lancer_chip(adapter) || BEx_chip(adapter))
4448 return;
4449
4450 if (adapter->vxlan_port != port)
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004451 goto done;
Sathya Perlac9c47142014-03-27 10:46:19 +05304452
4453 be_disable_vxlan_offloads(adapter);
4454
4455 dev_info(&adapter->pdev->dev,
4456 "Disabled VxLAN offloads for UDP port %d\n",
4457 be16_to_cpu(port));
Sriharsha Basavapatna630f4b72014-12-11 03:24:47 -05004458done:
4459 adapter->vxlan_port_count--;
Sathya Perlac9c47142014-03-27 10:46:19 +05304460}
Joe Stringer725d5482014-11-13 16:38:13 -08004461
Jesse Gross5f352272014-12-23 22:37:26 -08004462static netdev_features_t be_features_check(struct sk_buff *skb,
4463 struct net_device *dev,
4464 netdev_features_t features)
Joe Stringer725d5482014-11-13 16:38:13 -08004465{
Jesse Gross5f352272014-12-23 22:37:26 -08004466 return vxlan_features_check(skb, features);
Joe Stringer725d5482014-11-13 16:38:13 -08004467}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304468#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304469
stephen hemmingere5686ad2012-01-05 19:10:25 +00004470static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004471 .ndo_open = be_open,
4472 .ndo_stop = be_close,
4473 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004474 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004475 .ndo_set_mac_address = be_mac_addr_set,
4476 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004477 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004478 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004479 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4480 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004481 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004482 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004483 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004484 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304485 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004486#ifdef CONFIG_NET_POLL_CONTROLLER
4487 .ndo_poll_controller = be_netpoll,
4488#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004489 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4490 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304491#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304492 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304493#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304494#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304495 .ndo_add_vxlan_port = be_add_vxlan_port,
4496 .ndo_del_vxlan_port = be_del_vxlan_port,
Jesse Gross5f352272014-12-23 22:37:26 -08004497 .ndo_features_check = be_features_check,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304498#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004499};
4500
4501static void be_netdev_init(struct net_device *netdev)
4502{
4503 struct be_adapter *adapter = netdev_priv(netdev);
4504
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004505 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004506 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004507 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004508 if (be_multi_rxq(adapter))
4509 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004510
4511 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004512 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004513
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004514 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004515 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004516
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004517 netdev->priv_flags |= IFF_UNICAST_FLT;
4518
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004519 netdev->flags |= IFF_MULTICAST;
4520
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004521 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004522
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004523 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004524
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004525 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004526}
4527
4528static void be_unmap_pci_bars(struct be_adapter *adapter)
4529{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004530 if (adapter->csr)
4531 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004532 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004533 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004534}
4535
Sathya Perlace66f782012-11-06 17:48:58 +00004536static int db_bar(struct be_adapter *adapter)
4537{
4538 if (lancer_chip(adapter) || !be_physfn(adapter))
4539 return 0;
4540 else
4541 return 4;
4542}
4543
4544static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004545{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004546 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004547 adapter->roce_db.size = 4096;
4548 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4549 db_bar(adapter));
4550 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4551 db_bar(adapter));
4552 }
Parav Pandit045508a2012-03-26 14:27:13 +00004553 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004554}
4555
4556static int be_map_pci_bars(struct be_adapter *adapter)
4557{
4558 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004559
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004560 if (BEx_chip(adapter) && be_physfn(adapter)) {
4561 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304562 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004563 return -ENOMEM;
4564 }
4565
Sathya Perlace66f782012-11-06 17:48:58 +00004566 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304567 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004568 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004569 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004570
4571 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004572 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004573
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004574pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304575 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004576 be_unmap_pci_bars(adapter);
4577 return -ENOMEM;
4578}
4579
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004580static void be_ctrl_cleanup(struct be_adapter *adapter)
4581{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004582 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004583
4584 be_unmap_pci_bars(adapter);
4585
4586 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004587 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4588 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004589
Sathya Perla5b8821b2011-08-02 19:57:44 +00004590 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004591 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004592 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4593 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004594}
4595
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004596static int be_ctrl_init(struct be_adapter *adapter)
4597{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004598 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4599 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004600 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004601 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004602 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004603
Sathya Perlace66f782012-11-06 17:48:58 +00004604 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4605 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4606 SLI_INTF_FAMILY_SHIFT;
4607 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4608
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004609 status = be_map_pci_bars(adapter);
4610 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004611 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004612
4613 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004614 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4615 mbox_mem_alloc->size,
4616 &mbox_mem_alloc->dma,
4617 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004618 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004619 status = -ENOMEM;
4620 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004621 }
4622 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4623 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4624 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4625 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004626
Sathya Perla5b8821b2011-08-02 19:57:44 +00004627 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004628 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4629 rx_filter->size, &rx_filter->dma,
4630 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304631 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004632 status = -ENOMEM;
4633 goto free_mbox;
4634 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004635
Ivan Vecera29849612010-12-14 05:43:19 +00004636 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004637 spin_lock_init(&adapter->mcc_lock);
4638 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004639
Suresh Reddy5eeff632014-01-06 13:02:24 +05304640 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004641 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004642 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004643
4644free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004645 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4646 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004647
4648unmap_pci_bars:
4649 be_unmap_pci_bars(adapter);
4650
4651done:
4652 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004653}
4654
4655static void be_stats_cleanup(struct be_adapter *adapter)
4656{
Sathya Perla3abcded2010-10-03 22:12:27 -07004657 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004658
4659 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004660 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4661 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004662}
4663
4664static int be_stats_init(struct be_adapter *adapter)
4665{
Sathya Perla3abcded2010-10-03 22:12:27 -07004666 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004667
Sathya Perlaca34fe32012-11-06 17:48:56 +00004668 if (lancer_chip(adapter))
4669 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4670 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004671 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004672 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004673 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004674 else
4675 /* ALL non-BE ASICs */
4676 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004677
Joe Perchesede23fa2013-08-26 22:45:23 -07004678 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4679 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304680 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304681 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004682 return 0;
4683}
4684
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004685static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004686{
4687 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004688
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004689 if (!adapter)
4690 return;
4691
Parav Pandit045508a2012-03-26 14:27:13 +00004692 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004693 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004694
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004695 cancel_delayed_work_sync(&adapter->func_recovery_work);
4696
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004697 unregister_netdev(adapter->netdev);
4698
Sathya Perla5fb379e2009-06-18 00:02:59 +00004699 be_clear(adapter);
4700
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004701 /* tell fw we're done with firing cmds */
4702 be_cmd_fw_clean(adapter);
4703
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004704 be_stats_cleanup(adapter);
4705
4706 be_ctrl_cleanup(adapter);
4707
Sathya Perlad6b6d982012-09-05 01:56:48 +00004708 pci_disable_pcie_error_reporting(pdev);
4709
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004710 pci_release_regions(pdev);
4711 pci_disable_device(pdev);
4712
4713 free_netdev(adapter->netdev);
4714}
4715
Sathya Perla39f1d942012-05-08 19:41:24 +00004716static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004717{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304718 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004719
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004720 status = be_cmd_get_cntl_attributes(adapter);
4721 if (status)
4722 return status;
4723
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004724 /* Must be a power of 2 or else MODULO will BUG_ON */
4725 adapter->be_get_temp_freq = 64;
4726
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304727 if (BEx_chip(adapter)) {
4728 level = be_cmd_get_fw_log_level(adapter);
4729 adapter->msg_enable =
4730 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4731 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004732
Sathya Perla92bf14a2013-08-27 16:57:32 +05304733 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004734 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004735}
4736
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004737static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004738{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004739 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004740 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004741
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004742 status = lancer_test_and_set_rdy_state(adapter);
4743 if (status)
4744 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004745
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004746 if (netif_running(adapter->netdev))
4747 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004748
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004749 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004750
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004751 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004752
4753 status = be_setup(adapter);
4754 if (status)
4755 goto err;
4756
4757 if (netif_running(adapter->netdev)) {
4758 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004759 if (status)
4760 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004761 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004762
Somnath Kotur4bebb562013-12-05 12:07:55 +05304763 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004764 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004765err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004766 if (status == -EAGAIN)
4767 dev_err(dev, "Waiting for resource provisioning\n");
4768 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304769 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004770
4771 return status;
4772}
4773
4774static void be_func_recovery_task(struct work_struct *work)
4775{
4776 struct be_adapter *adapter =
4777 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004778 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004779
4780 be_detect_error(adapter);
4781
4782 if (adapter->hw_error && lancer_chip(adapter)) {
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004783 rtnl_lock();
4784 netif_device_detach(adapter->netdev);
4785 rtnl_unlock();
4786
4787 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004788 if (!status)
4789 netif_device_attach(adapter->netdev);
4790 }
4791
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004792 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4793 * no need to attempt further recovery.
4794 */
4795 if (!status || status == -EAGAIN)
4796 schedule_delayed_work(&adapter->func_recovery_work,
4797 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004798}
4799
4800static void be_worker(struct work_struct *work)
4801{
4802 struct be_adapter *adapter =
4803 container_of(work, struct be_adapter, work.work);
4804 struct be_rx_obj *rxo;
4805 int i;
4806
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004807 /* when interrupts are not yet enabled, just reap any pending
4808 * mcc completions */
4809 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004810 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004811 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004812 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004813 goto reschedule;
4814 }
4815
4816 if (!adapter->stats_cmd_sent) {
4817 if (lancer_chip(adapter))
4818 lancer_cmd_get_pport_stats(adapter,
Kalesh APcd3307aa2014-09-19 15:47:02 +05304819 &adapter->stats_cmd);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004820 else
4821 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4822 }
4823
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304824 if (be_physfn(adapter) &&
4825 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004826 be_cmd_get_die_temperature(adapter);
4827
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004828 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304829 /* Replenish RX-queues starved due to memory
4830 * allocation failures.
4831 */
4832 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05304833 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004834 }
4835
Sathya Perla2632baf2013-10-01 16:00:00 +05304836 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004837
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004838reschedule:
4839 adapter->work_counter++;
4840 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4841}
4842
Sathya Perla257a3fe2013-06-14 15:54:51 +05304843/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004844static bool be_reset_required(struct be_adapter *adapter)
4845{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304846 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004847}
4848
Sathya Perlad3791422012-09-28 04:39:44 +00004849static char *mc_name(struct be_adapter *adapter)
4850{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304851 char *str = ""; /* default */
4852
4853 switch (adapter->mc_type) {
4854 case UMC:
4855 str = "UMC";
4856 break;
4857 case FLEX10:
4858 str = "FLEX10";
4859 break;
4860 case vNIC1:
4861 str = "vNIC-1";
4862 break;
4863 case nPAR:
4864 str = "nPAR";
4865 break;
4866 case UFP:
4867 str = "UFP";
4868 break;
4869 case vNIC2:
4870 str = "vNIC-2";
4871 break;
4872 default:
4873 str = "";
4874 }
4875
4876 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004877}
4878
4879static inline char *func_name(struct be_adapter *adapter)
4880{
4881 return be_physfn(adapter) ? "PF" : "VF";
4882}
4883
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004884static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004885{
4886 int status = 0;
4887 struct be_adapter *adapter;
4888 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004889 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004890
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304891 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4892
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004893 status = pci_enable_device(pdev);
4894 if (status)
4895 goto do_none;
4896
4897 status = pci_request_regions(pdev, DRV_NAME);
4898 if (status)
4899 goto disable_dev;
4900 pci_set_master(pdev);
4901
Sathya Perla7f640062012-06-05 19:37:20 +00004902 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304903 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004904 status = -ENOMEM;
4905 goto rel_reg;
4906 }
4907 adapter = netdev_priv(netdev);
4908 adapter->pdev = pdev;
4909 pci_set_drvdata(pdev, adapter);
4910 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004911 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004912
Russell King4c15c242013-06-26 23:49:11 +01004913 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004914 if (!status) {
4915 netdev->features |= NETIF_F_HIGHDMA;
4916 } else {
Russell King4c15c242013-06-26 23:49:11 +01004917 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004918 if (status) {
4919 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4920 goto free_netdev;
4921 }
4922 }
4923
Kalesh AP2f951a92014-09-12 17:39:21 +05304924 status = pci_enable_pcie_error_reporting(pdev);
4925 if (!status)
4926 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004927
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004928 status = be_ctrl_init(adapter);
4929 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004930 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004931
Sathya Perla2243e2e2009-11-22 22:02:03 +00004932 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004933 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004934 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004935 if (status)
4936 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004937 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004938
Sathya Perla39f1d942012-05-08 19:41:24 +00004939 if (be_reset_required(adapter)) {
4940 status = be_cmd_reset_function(adapter);
4941 if (status)
4942 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004943
Kalesh AP2d177be2013-04-28 22:22:29 +00004944 /* Wait for interrupts to quiesce after an FLR */
4945 msleep(100);
4946 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004947
4948 /* Allow interrupts for other ULPs running on NIC function */
4949 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004950
Kalesh AP2d177be2013-04-28 22:22:29 +00004951 /* tell fw we're ready to fire cmds */
4952 status = be_cmd_fw_init(adapter);
4953 if (status)
4954 goto ctrl_clean;
4955
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004956 status = be_stats_init(adapter);
4957 if (status)
4958 goto ctrl_clean;
4959
Sathya Perla39f1d942012-05-08 19:41:24 +00004960 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004961 if (status)
4962 goto stats_clean;
4963
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004964 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004965 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Kalesh AP5f820b62014-09-19 15:47:01 +05304966 adapter->rx_fc = true;
4967 adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004968
Sathya Perla5fb379e2009-06-18 00:02:59 +00004969 status = be_setup(adapter);
4970 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004971 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004972
Sathya Perla3abcded2010-10-03 22:12:27 -07004973 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004974 status = register_netdev(netdev);
4975 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004976 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004977
Parav Pandit045508a2012-03-26 14:27:13 +00004978 be_roce_dev_add(adapter);
4979
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004980 schedule_delayed_work(&adapter->func_recovery_work,
4981 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004982
4983 be_cmd_query_port_name(adapter, &port_name);
4984
Sathya Perlad3791422012-09-28 04:39:44 +00004985 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4986 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004987
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004988 return 0;
4989
Sathya Perla5fb379e2009-06-18 00:02:59 +00004990unsetup:
4991 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004992stats_clean:
4993 be_stats_cleanup(adapter);
4994ctrl_clean:
4995 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004996free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004997 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004998rel_reg:
4999 pci_release_regions(pdev);
5000disable_dev:
5001 pci_disable_device(pdev);
5002do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07005003 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005004 return status;
5005}
5006
5007static int be_suspend(struct pci_dev *pdev, pm_message_t state)
5008{
5009 struct be_adapter *adapter = pci_get_drvdata(pdev);
5010 struct net_device *netdev = adapter->netdev;
5011
Suresh Reddy76a9e082014-01-15 13:23:40 +05305012 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005013 be_setup_wol(adapter, true);
5014
Ajit Khaparded4360d62013-11-22 12:51:09 -06005015 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005016 cancel_delayed_work_sync(&adapter->func_recovery_work);
5017
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005018 netif_device_detach(netdev);
5019 if (netif_running(netdev)) {
5020 rtnl_lock();
5021 be_close(netdev);
5022 rtnl_unlock();
5023 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005024 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005025
5026 pci_save_state(pdev);
5027 pci_disable_device(pdev);
5028 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5029 return 0;
5030}
5031
5032static int be_resume(struct pci_dev *pdev)
5033{
5034 int status = 0;
5035 struct be_adapter *adapter = pci_get_drvdata(pdev);
5036 struct net_device *netdev = adapter->netdev;
5037
5038 netif_device_detach(netdev);
5039
5040 status = pci_enable_device(pdev);
5041 if (status)
5042 return status;
5043
Yijing Wang1ca01512013-06-27 20:53:42 +08005044 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005045 pci_restore_state(pdev);
5046
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305047 status = be_fw_wait_ready(adapter);
5048 if (status)
5049 return status;
5050
Ajit Khaparded4360d62013-11-22 12:51:09 -06005051 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005052 /* tell fw we're ready to fire cmds */
5053 status = be_cmd_fw_init(adapter);
5054 if (status)
5055 return status;
5056
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005057 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005058 if (netif_running(netdev)) {
5059 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005060 be_open(netdev);
5061 rtnl_unlock();
5062 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005063
5064 schedule_delayed_work(&adapter->func_recovery_work,
5065 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005066 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005067
Suresh Reddy76a9e082014-01-15 13:23:40 +05305068 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005069 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005070
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005071 return 0;
5072}
5073
Sathya Perla82456b02010-02-17 01:35:37 +00005074/*
5075 * An FLR will stop BE from DMAing any data.
5076 */
5077static void be_shutdown(struct pci_dev *pdev)
5078{
5079 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005080
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005081 if (!adapter)
5082 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005083
Devesh Sharmad114f992014-06-10 19:32:15 +05305084 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005085 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005086 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005087
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005088 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005089
Ajit Khaparde57841862011-04-06 18:08:43 +00005090 be_cmd_reset_function(adapter);
5091
Sathya Perla82456b02010-02-17 01:35:37 +00005092 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005093}
5094
Sathya Perlacf588472010-02-14 21:22:01 +00005095static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305096 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005097{
5098 struct be_adapter *adapter = pci_get_drvdata(pdev);
5099 struct net_device *netdev = adapter->netdev;
5100
5101 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5102
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005103 if (!adapter->eeh_error) {
5104 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005105
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005106 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005107
Sathya Perlacf588472010-02-14 21:22:01 +00005108 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005109 netif_device_detach(netdev);
5110 if (netif_running(netdev))
5111 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005112 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005113
5114 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005115 }
Sathya Perlacf588472010-02-14 21:22:01 +00005116
5117 if (state == pci_channel_io_perm_failure)
5118 return PCI_ERS_RESULT_DISCONNECT;
5119
5120 pci_disable_device(pdev);
5121
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005122 /* The error could cause the FW to trigger a flash debug dump.
5123 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005124 * can cause it not to recover; wait for it to finish.
5125 * Wait only for first function as it is needed only once per
5126 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005127 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005128 if (pdev->devfn == 0)
5129 ssleep(30);
5130
Sathya Perlacf588472010-02-14 21:22:01 +00005131 return PCI_ERS_RESULT_NEED_RESET;
5132}
5133
5134static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5135{
5136 struct be_adapter *adapter = pci_get_drvdata(pdev);
5137 int status;
5138
5139 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005140
5141 status = pci_enable_device(pdev);
5142 if (status)
5143 return PCI_ERS_RESULT_DISCONNECT;
5144
5145 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005146 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005147 pci_restore_state(pdev);
5148
5149 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005150 dev_info(&adapter->pdev->dev,
5151 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005152 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005153 if (status)
5154 return PCI_ERS_RESULT_DISCONNECT;
5155
Sathya Perlad6b6d982012-09-05 01:56:48 +00005156 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005157 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005158 return PCI_ERS_RESULT_RECOVERED;
5159}
5160
5161static void be_eeh_resume(struct pci_dev *pdev)
5162{
5163 int status = 0;
5164 struct be_adapter *adapter = pci_get_drvdata(pdev);
5165 struct net_device *netdev = adapter->netdev;
5166
5167 dev_info(&adapter->pdev->dev, "EEH resume\n");
5168
5169 pci_save_state(pdev);
5170
Kalesh AP2d177be2013-04-28 22:22:29 +00005171 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005172 if (status)
5173 goto err;
5174
Kalesh AP03a58ba2014-05-13 14:03:11 +05305175 /* On some BE3 FW versions, after a HW reset,
5176 * interrupts will remain disabled for each function.
5177 * So, explicitly enable interrupts
5178 */
5179 be_intr_set(adapter, true);
5180
Kalesh AP2d177be2013-04-28 22:22:29 +00005181 /* tell fw we're ready to fire cmds */
5182 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005183 if (status)
5184 goto err;
5185
Sathya Perlacf588472010-02-14 21:22:01 +00005186 status = be_setup(adapter);
5187 if (status)
5188 goto err;
5189
5190 if (netif_running(netdev)) {
5191 status = be_open(netdev);
5192 if (status)
5193 goto err;
5194 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005195
5196 schedule_delayed_work(&adapter->func_recovery_work,
5197 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005198 netif_device_attach(netdev);
5199 return;
5200err:
5201 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005202}
5203
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005204static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005205 .error_detected = be_eeh_err_detected,
5206 .slot_reset = be_eeh_reset,
5207 .resume = be_eeh_resume,
5208};
5209
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005210static struct pci_driver be_driver = {
5211 .name = DRV_NAME,
5212 .id_table = be_dev_ids,
5213 .probe = be_probe,
5214 .remove = be_remove,
5215 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005216 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005217 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005218 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005219};
5220
5221static int __init be_init_module(void)
5222{
Joe Perches8e95a202009-12-03 07:58:21 +00005223 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5224 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005225 printk(KERN_WARNING DRV_NAME
5226 " : Module param rx_frag_size must be 2048/4096/8192."
5227 " Using 2048\n");
5228 rx_frag_size = 2048;
5229 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005230
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005231 return pci_register_driver(&be_driver);
5232}
5233module_init(be_init_module);
5234
5235static void __exit be_exit_module(void)
5236{
5237 pci_unregister_driver(&be_driver);
5238}
5239module_exit(be_exit_module);