blob: fbab943ec757d9b1735f52a5059ac8ab59bc0237 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Vasundhara Volam40263822014-02-12 16:09:07 +05302 * Copyright (C) 2005 - 2014 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Paul Gortmaker9d9779e2011-07-03 15:21:01 -040019#include <linux/module.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070020#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000021#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070022#include <asm/div64.h>
Sathya Perlad6b6d982012-09-05 01:56:48 +000023#include <linux/aer.h>
Ajit Khapardea77dcb82013-08-30 15:01:16 -050024#include <linux/if_bridge.h>
Sathya Perla6384a4d2013-10-25 10:40:16 +053025#include <net/busy_poll.h>
Sathya Perlac9c47142014-03-27 10:46:19 +053026#include <net/vxlan.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070027
28MODULE_VERSION(DRV_VER);
29MODULE_DEVICE_TABLE(pci, be_dev_ids);
30MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
Sarveshwar Bandi00d3d512013-01-28 04:17:01 +000031MODULE_AUTHOR("Emulex Corporation");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_LICENSE("GPL");
33
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034static unsigned int num_vfs;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000035module_param(num_vfs, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000036MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070037
Sathya Perla11ac75e2011-12-13 00:58:50 +000038static ushort rx_frag_size = 2048;
39module_param(rx_frag_size, ushort, S_IRUGO);
40MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
Benoit Taine9baa3c32014-08-08 15:56:03 +020042static const struct pci_device_id be_dev_ids[] = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070044 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070045 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000047 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000048 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Ajit Khapardeecedb6a2011-12-15 06:31:38 +000049 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
Padmanabh Ratnakar76b73532012-10-20 06:04:40 +000050 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070051 { 0 }
52};
53MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000054/* UE Status Low CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070055static const char * const ue_status_low_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000056 "CEV",
57 "CTX",
58 "DBUF",
59 "ERX",
60 "Host",
61 "MPU",
62 "NDMA",
63 "PTC ",
64 "RDMA ",
65 "RXF ",
66 "RXIPS ",
67 "RXULP0 ",
68 "RXULP1 ",
69 "RXULP2 ",
70 "TIM ",
71 "TPOST ",
72 "TPRE ",
73 "TXIPS ",
74 "TXULP0 ",
75 "TXULP1 ",
76 "UC ",
77 "WDMA ",
78 "TXULP2 ",
79 "HOST1 ",
80 "P0_OB_LINK ",
81 "P1_OB_LINK ",
82 "HOST_GPIO ",
83 "MBOX ",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +053084 "ERX2 ",
85 "SPARE ",
86 "JTAG ",
87 "MPU_INTPEND "
Ajit Khaparde7c185272010-07-29 06:16:33 +000088};
89/* UE Status High CSR */
Joe Perches42c8b112011-07-09 02:56:56 -070090static const char * const ue_status_hi_desc[] = {
Ajit Khaparde7c185272010-07-29 06:16:33 +000091 "LPCMEMHOST",
92 "MGMT_MAC",
93 "PCS0ONLINE",
94 "MPU_IRAM",
95 "PCS1ONLINE",
96 "PCTL0",
97 "PCTL1",
98 "PMEM",
99 "RR",
100 "TXPB",
101 "RXPP",
102 "XAUI",
103 "TXP",
104 "ARM",
105 "IPC",
106 "HOST2",
107 "HOST3",
108 "HOST4",
109 "HOST5",
110 "HOST6",
111 "HOST7",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530112 "ECRC",
113 "Poison TLP",
Joe Perches42c8b112011-07-09 02:56:56 -0700114 "NETC",
Vasundhara Volam6bdf8f52014-07-17 16:20:25 +0530115 "PERIPH",
116 "LLTXULP",
117 "D2P",
118 "RCON",
119 "LDMA",
120 "LLTXP",
121 "LLTXPB",
Ajit Khaparde7c185272010-07-29 06:16:33 +0000122 "Unknown"
123};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124
Sathya Perla752961a2011-10-24 02:45:03 +0000125
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700126static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127{
128 struct be_dma_mem *mem = &q->dma_mem;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530129
Sathya Perla1cfafab2012-02-23 18:50:15 +0000130 if (mem->va) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000131 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 mem->dma);
Sathya Perla1cfafab2012-02-23 18:50:15 +0000133 mem->va = NULL;
134 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700135}
136
137static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
Sathya Perla748b5392014-05-09 13:29:13 +0530138 u16 len, u16 entry_size)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
140 struct be_dma_mem *mem = &q->dma_mem;
141
142 memset(q, 0, sizeof(*q));
143 q->len = len;
144 q->entry_size = entry_size;
145 mem->size = len * entry_size;
Joe Perchesede23fa2013-08-26 22:45:23 -0700146 mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 if (!mem->va)
Sathya Perla10ef9ab2012-02-09 18:05:27 +0000149 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 return 0;
151}
152
Somnath Kotur68c45a22013-03-14 02:42:07 +0000153static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154{
Sathya Perladb3ea782011-08-22 19:41:52 +0000155 u32 reg, enabled;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000156
Sathya Perladb3ea782011-08-22 19:41:52 +0000157 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
Sathya Perla748b5392014-05-09 13:29:13 +0530158 &reg);
Sathya Perladb3ea782011-08-22 19:41:52 +0000159 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000163 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000165 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000167
Sathya Perladb3ea782011-08-22 19:41:52 +0000168 pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530169 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700170}
171
Somnath Kotur68c45a22013-03-14 02:42:07 +0000172static void be_intr_set(struct be_adapter *adapter, bool enable)
173{
174 int status = 0;
175
176 /* On lancer interrupts can't be controlled via this register */
177 if (lancer_chip(adapter))
178 return;
179
180 if (adapter->eeh_error)
181 return;
182
183 status = be_cmd_intr_set(adapter, enable);
184 if (status)
185 be_reg_intr_set(adapter, enable);
186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189{
190 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 val |= qid & DB_RQ_RING_ID_MASK;
193 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000194
195 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000196 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700197}
198
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000199static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
200 u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201{
202 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530203
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000204 val |= txo->q.id & DB_TXULP_RING_ID_MASK;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700205 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000206
207 wmb();
Vasundhara Volam94d73aa2013-04-21 23:28:14 +0000208 iowrite32(val, adapter->db + txo->db_offset);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209}
210
Sathya Perla8788fdc2009-07-27 22:52:03 +0000211static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla748b5392014-05-09 13:29:13 +0530212 bool arm, bool clear_int, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213{
214 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530215
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700216 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perla748b5392014-05-09 13:29:13 +0530217 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000218
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000219 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000220 return;
221
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222 if (arm)
223 val |= 1 << DB_EQ_REARM_SHIFT;
224 if (clear_int)
225 val |= 1 << DB_EQ_CLR_SHIFT;
226 val |= 1 << DB_EQ_EVNT_SHIFT;
227 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000228 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700229}
230
Sathya Perla8788fdc2009-07-27 22:52:03 +0000231void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700232{
233 u32 val = 0;
Kalesh AP03d28ff2014-09-19 15:46:56 +0530234
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700235 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000236 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
237 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000238
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +0000239 if (adapter->eeh_error)
Sathya Perlacf588472010-02-14 21:22:01 +0000240 return;
241
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (arm)
243 val |= 1 << DB_CQ_REARM_SHIFT;
244 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000245 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700246}
247
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700248static int be_mac_addr_set(struct net_device *netdev, void *p)
249{
250 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla5a712c12013-07-23 15:24:59 +0530251 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252 struct sockaddr *addr = p;
Sathya Perla5a712c12013-07-23 15:24:59 +0530253 int status;
254 u8 mac[ETH_ALEN];
255 u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000257 if (!is_valid_ether_addr(addr->sa_data))
258 return -EADDRNOTAVAIL;
259
Vasundhara Volamff32f8a2014-01-15 13:23:35 +0530260 /* Proceed further only if, User provided MAC is different
261 * from active MAC
262 */
263 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
264 return 0;
265
Sathya Perla5a712c12013-07-23 15:24:59 +0530266 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
267 * privilege or if PF did not provision the new MAC address.
268 * On BE3, this cmd will always fail if the VF doesn't have the
269 * FILTMGMT privilege. This failure is OK, only if the PF programmed
270 * the MAC for the VF.
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000271 */
Sathya Perla5a712c12013-07-23 15:24:59 +0530272 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
273 adapter->if_handle, &adapter->pmac_id[0], 0);
274 if (!status) {
275 curr_pmac_id = adapter->pmac_id[0];
276
277 /* Delete the old programmed MAC. This call may fail if the
278 * old MAC was already deleted by the PF driver.
279 */
280 if (adapter->pmac_id[0] != old_pmac_id)
281 be_cmd_pmac_del(adapter, adapter->if_handle,
282 old_pmac_id, 0);
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000283 }
284
Sathya Perla5a712c12013-07-23 15:24:59 +0530285 /* Decide if the new MAC is successfully activated only after
286 * querying the FW
Padmanabh Ratnakar704e4c82012-10-20 06:02:13 +0000287 */
Suresh Reddyb188f092014-01-15 13:23:39 +0530288 status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
289 adapter->if_handle, true, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000290 if (status)
Somnath Koture3a7ae22011-10-27 07:14:05 +0000291 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292
Sathya Perla5a712c12013-07-23 15:24:59 +0530293 /* The MAC change did not happen, either due to lack of privilege
294 * or PF didn't pre-provision.
295 */
dingtianhong61d23e92013-12-30 15:40:43 +0800296 if (!ether_addr_equal(addr->sa_data, mac)) {
Sathya Perla5a712c12013-07-23 15:24:59 +0530297 status = -EPERM;
298 goto err;
299 }
300
Somnath Koture3a7ae22011-10-27 07:14:05 +0000301 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
Sathya Perla5a712c12013-07-23 15:24:59 +0530302 dev_info(dev, "MAC address changed to %pM\n", mac);
Somnath Koture3a7ae22011-10-27 07:14:05 +0000303 return 0;
304err:
Sathya Perla5a712c12013-07-23 15:24:59 +0530305 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700306 return status;
307}
308
Sathya Perlaca34fe32012-11-06 17:48:56 +0000309/* BE2 supports only v0 cmd */
310static void *hw_stats_from_cmd(struct be_adapter *adapter)
311{
312 if (BE2_chip(adapter)) {
313 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
314
315 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500316 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000317 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
318
319 return &cmd->hw_stats;
Ajit Khaparde61000862013-10-03 16:16:33 -0500320 } else {
321 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
322
323 return &cmd->hw_stats;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000324 }
325}
326
327/* BE2 supports only v0 cmd */
328static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
329{
330 if (BE2_chip(adapter)) {
331 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332
333 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500334 } else if (BE3_chip(adapter)) {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000335 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
336
337 return &hw_stats->erx;
Ajit Khaparde61000862013-10-03 16:16:33 -0500338 } else {
339 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
340
341 return &hw_stats->erx;
Sathya Perlaca34fe32012-11-06 17:48:56 +0000342 }
343}
344
345static void populate_be_v0_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000346{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000347 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
348 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
349 struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000350 struct be_port_rxf_stats_v0 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000351 &rxf_stats->port[adapter->port_num];
352 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000353
Sathya Perlaac124ff2011-07-25 19:10:14 +0000354 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000355 drvs->rx_pause_frames = port_stats->rx_pause_frames;
356 drvs->rx_crc_errors = port_stats->rx_crc_errors;
357 drvs->rx_control_frames = port_stats->rx_control_frames;
358 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
359 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
360 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
361 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
362 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
363 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
364 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
365 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
366 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
367 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
368 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000369 drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000370 drvs->rx_dropped_header_too_small =
371 port_stats->rx_dropped_header_too_small;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000372 drvs->rx_address_filtered =
373 port_stats->rx_address_filtered +
374 port_stats->rx_vlan_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000375 drvs->rx_alignment_symbol_errors =
376 port_stats->rx_alignment_symbol_errors;
377
378 drvs->tx_pauseframes = port_stats->tx_pauseframes;
379 drvs->tx_controlframes = port_stats->tx_controlframes;
380
381 if (adapter->port_num)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000382 drvs->jabber_events = rxf_stats->port1_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000383 else
Sathya Perlaac124ff2011-07-25 19:10:14 +0000384 drvs->jabber_events = rxf_stats->port0_jabber_events;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000385 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000386 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000387 drvs->forwarded_packets = rxf_stats->forwarded_packets;
388 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000389 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
390 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000391 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
392}
393
Sathya Perlaca34fe32012-11-06 17:48:56 +0000394static void populate_be_v1_stats(struct be_adapter *adapter)
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000395{
Sathya Perlaac124ff2011-07-25 19:10:14 +0000396 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
397 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
398 struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000399 struct be_port_rxf_stats_v1 *port_stats =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000400 &rxf_stats->port[adapter->port_num];
401 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000402
Sathya Perlaac124ff2011-07-25 19:10:14 +0000403 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
Ajit Khaparde02fe7022011-12-09 13:53:09 +0000404 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
405 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000406 drvs->rx_pause_frames = port_stats->rx_pause_frames;
407 drvs->rx_crc_errors = port_stats->rx_crc_errors;
408 drvs->rx_control_frames = port_stats->rx_control_frames;
409 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
410 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
411 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
412 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
413 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
414 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
415 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
416 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
417 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
418 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
419 drvs->rx_dropped_header_too_small =
420 port_stats->rx_dropped_header_too_small;
421 drvs->rx_input_fifo_overflow_drop =
422 port_stats->rx_input_fifo_overflow_drop;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000423 drvs->rx_address_filtered = port_stats->rx_address_filtered;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000424 drvs->rx_alignment_symbol_errors =
425 port_stats->rx_alignment_symbol_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000426 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 drvs->tx_pauseframes = port_stats->tx_pauseframes;
428 drvs->tx_controlframes = port_stats->tx_controlframes;
Ajit Khapardeb5adffc42013-05-01 09:38:00 +0000429 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000430 drvs->jabber_events = port_stats->jabber_events;
431 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000432 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000433 drvs->forwarded_packets = rxf_stats->forwarded_packets;
434 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000435 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
436 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000437 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
438}
439
Ajit Khaparde61000862013-10-03 16:16:33 -0500440static void populate_be_v2_stats(struct be_adapter *adapter)
441{
442 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
443 struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
444 struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
445 struct be_port_rxf_stats_v2 *port_stats =
446 &rxf_stats->port[adapter->port_num];
447 struct be_drv_stats *drvs = &adapter->drv_stats;
448
449 be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
450 drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
451 drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
452 drvs->rx_pause_frames = port_stats->rx_pause_frames;
453 drvs->rx_crc_errors = port_stats->rx_crc_errors;
454 drvs->rx_control_frames = port_stats->rx_control_frames;
455 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
456 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
457 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
458 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
459 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
460 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
461 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
462 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
463 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
464 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
465 drvs->rx_dropped_header_too_small =
466 port_stats->rx_dropped_header_too_small;
467 drvs->rx_input_fifo_overflow_drop =
468 port_stats->rx_input_fifo_overflow_drop;
469 drvs->rx_address_filtered = port_stats->rx_address_filtered;
470 drvs->rx_alignment_symbol_errors =
471 port_stats->rx_alignment_symbol_errors;
472 drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
473 drvs->tx_pauseframes = port_stats->tx_pauseframes;
474 drvs->tx_controlframes = port_stats->tx_controlframes;
475 drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
476 drvs->jabber_events = port_stats->jabber_events;
477 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
478 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
479 drvs->forwarded_packets = rxf_stats->forwarded_packets;
480 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
481 drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
482 drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
483 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
Sathya Perla748b5392014-05-09 13:29:13 +0530484 if (be_roce_supported(adapter)) {
Ajit Khaparde461ae372013-10-03 16:16:50 -0500485 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
486 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
487 drvs->rx_roce_frames = port_stats->roce_frames_received;
488 drvs->roce_drops_crc = port_stats->roce_drops_crc;
489 drvs->roce_drops_payload_len =
490 port_stats->roce_drops_payload_len;
491 }
Ajit Khaparde61000862013-10-03 16:16:33 -0500492}
493
Selvin Xavier005d5692011-05-16 07:36:35 +0000494static void populate_lancer_stats(struct be_adapter *adapter)
495{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000496
Selvin Xavier005d5692011-05-16 07:36:35 +0000497 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla748b5392014-05-09 13:29:13 +0530498 struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000499
500 be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
501 drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
502 drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
503 drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000504 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000505 drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000506 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
507 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
508 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
509 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
510 drvs->rx_dropped_tcp_length =
511 pport_stats->rx_dropped_invalid_tcp_length;
512 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
513 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
514 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
515 drvs->rx_dropped_header_too_small =
516 pport_stats->rx_dropped_header_too_small;
517 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Suresh Reddy18fb06a2013-04-25 23:03:21 +0000518 drvs->rx_address_filtered =
519 pport_stats->rx_address_filtered +
520 pport_stats->rx_vlan_filtered;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000521 drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000522 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000523 drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
524 drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000525 drvs->jabber_events = pport_stats->rx_jabbers;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000526 drvs->forwarded_packets = pport_stats->num_forwards_lo;
527 drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000528 drvs->rx_drops_too_many_frags =
Sathya Perlaac124ff2011-07-25 19:10:14 +0000529 pport_stats->rx_drops_too_many_frags_lo;
Selvin Xavier005d5692011-05-16 07:36:35 +0000530}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000531
Sathya Perla09c1c682011-08-22 19:41:53 +0000532static void accumulate_16bit_val(u32 *acc, u16 val)
533{
534#define lo(x) (x & 0xFFFF)
535#define hi(x) (x & 0xFFFF0000)
536 bool wrapped = val < lo(*acc);
537 u32 newacc = hi(*acc) + val;
538
539 if (wrapped)
540 newacc += 65536;
541 ACCESS_ONCE(*acc) = newacc;
542}
543
Jingoo Han4188e7d2013-08-05 18:02:02 +0900544static void populate_erx_stats(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530545 struct be_rx_obj *rxo, u32 erx_stat)
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000546{
547 if (!BEx_chip(adapter))
548 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
549 else
550 /* below erx HW counter can actually wrap around after
551 * 65535. Driver accumulates a 32-bit value
552 */
553 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
554 (u16)erx_stat);
555}
556
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000557void be_parse_stats(struct be_adapter *adapter)
558{
Ajit Khaparde61000862013-10-03 16:16:33 -0500559 struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000560 struct be_rx_obj *rxo;
561 int i;
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000562 u32 erx_stat;
Sathya Perlaac124ff2011-07-25 19:10:14 +0000563
Sathya Perlaca34fe32012-11-06 17:48:56 +0000564 if (lancer_chip(adapter)) {
565 populate_lancer_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000566 } else {
Sathya Perlaca34fe32012-11-06 17:48:56 +0000567 if (BE2_chip(adapter))
568 populate_be_v0_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500569 else if (BE3_chip(adapter))
570 /* for BE3 */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000571 populate_be_v1_stats(adapter);
Ajit Khaparde61000862013-10-03 16:16:33 -0500572 else
573 populate_be_v2_stats(adapter);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000574
Ajit Khaparde61000862013-10-03 16:16:33 -0500575 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
Sathya Perlaca34fe32012-11-06 17:48:56 +0000576 for_all_rx_queues(adapter, rxo, i) {
Ajit Khapardea6c578e2013-05-01 09:37:27 +0000577 erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
578 populate_erx_stats(adapter, rxo, erx_stat);
Sathya Perlaca34fe32012-11-06 17:48:56 +0000579 }
Sathya Perla09c1c682011-08-22 19:41:53 +0000580 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000581}
582
Sathya Perlaab1594e2011-07-25 19:10:15 +0000583static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
Sathya Perla748b5392014-05-09 13:29:13 +0530584 struct rtnl_link_stats64 *stats)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700585{
Sathya Perlaab1594e2011-07-25 19:10:15 +0000586 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000587 struct be_drv_stats *drvs = &adapter->drv_stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700588 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000589 struct be_tx_obj *txo;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000590 u64 pkts, bytes;
591 unsigned int start;
Sathya Perla3abcded2010-10-03 22:12:27 -0700592 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700593
Sathya Perla3abcded2010-10-03 22:12:27 -0700594 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000595 const struct be_rx_stats *rx_stats = rx_stats(rxo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530596
Sathya Perlaab1594e2011-07-25 19:10:15 +0000597 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700598 start = u64_stats_fetch_begin_irq(&rx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000599 pkts = rx_stats(rxo)->rx_pkts;
600 bytes = rx_stats(rxo)->rx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700601 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000602 stats->rx_packets += pkts;
603 stats->rx_bytes += bytes;
604 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
605 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
606 rx_stats(rxo)->rx_drops_no_frags;
Sathya Perla3abcded2010-10-03 22:12:27 -0700607 }
608
Sathya Perla3c8def92011-06-12 20:01:58 +0000609 for_all_tx_queues(adapter, txo, i) {
Sathya Perlaab1594e2011-07-25 19:10:15 +0000610 const struct be_tx_stats *tx_stats = tx_stats(txo);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530611
Sathya Perlaab1594e2011-07-25 19:10:15 +0000612 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700613 start = u64_stats_fetch_begin_irq(&tx_stats->sync);
Sathya Perlaab1594e2011-07-25 19:10:15 +0000614 pkts = tx_stats(txo)->tx_pkts;
615 bytes = tx_stats(txo)->tx_bytes;
Eric W. Biederman57a77442014-03-13 21:26:42 -0700616 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
Sathya Perlaab1594e2011-07-25 19:10:15 +0000617 stats->tx_packets += pkts;
618 stats->tx_bytes += bytes;
Sathya Perla3c8def92011-06-12 20:01:58 +0000619 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700620
621 /* bad pkts received */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000622 stats->rx_errors = drvs->rx_crc_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000623 drvs->rx_alignment_symbol_errors +
624 drvs->rx_in_range_errors +
625 drvs->rx_out_range_errors +
626 drvs->rx_frame_too_long +
627 drvs->rx_dropped_too_small +
628 drvs->rx_dropped_too_short +
629 drvs->rx_dropped_header_too_small +
630 drvs->rx_dropped_tcp_length +
Sathya Perlaab1594e2011-07-25 19:10:15 +0000631 drvs->rx_dropped_runt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700632
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633 /* detailed rx errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000634 stats->rx_length_errors = drvs->rx_in_range_errors +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000635 drvs->rx_out_range_errors +
636 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000637
Sathya Perlaab1594e2011-07-25 19:10:15 +0000638 stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639
640 /* frame alignment errors */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000641 stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000642
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700643 /* receiver fifo overrun */
644 /* drops_no_pbuf is no per i/f, it's per BE card */
Sathya Perlaab1594e2011-07-25 19:10:15 +0000645 stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000646 drvs->rx_input_fifo_overflow_drop +
647 drvs->rx_drops_no_pbuf;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000648 return stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649}
650
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000651void be_link_status_update(struct be_adapter *adapter, u8 link_status)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700652{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700653 struct net_device *netdev = adapter->netdev;
654
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000655 if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
Sathya Perlaea172a02011-08-02 19:57:42 +0000656 netif_carrier_off(netdev);
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000657 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 }
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000659
Suresh Reddybdce2ad2014-03-11 18:53:04 +0530660 if (link_status)
Ajit Khapardeb236916a2011-12-30 12:15:40 +0000661 netif_carrier_on(netdev);
662 else
663 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664}
665
Sathya Perla3c8def92011-06-12 20:01:58 +0000666static void be_tx_stats_update(struct be_tx_obj *txo,
Sathya Perla748b5392014-05-09 13:29:13 +0530667 u32 wrb_cnt, u32 copied, u32 gso_segs,
668 bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669{
Sathya Perla3c8def92011-06-12 20:01:58 +0000670 struct be_tx_stats *stats = tx_stats(txo);
671
Sathya Perlaab1594e2011-07-25 19:10:15 +0000672 u64_stats_update_begin(&stats->sync);
Sathya Perlaac124ff2011-07-25 19:10:14 +0000673 stats->tx_reqs++;
674 stats->tx_wrbs += wrb_cnt;
675 stats->tx_bytes += copied;
676 stats->tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 if (stopped)
Sathya Perlaac124ff2011-07-25 19:10:14 +0000678 stats->tx_stops++;
Sathya Perlaab1594e2011-07-25 19:10:15 +0000679 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680}
681
682/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000683static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
Sathya Perla748b5392014-05-09 13:29:13 +0530684 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700685{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700686 int cnt = (skb->len > skb->data_len);
687
688 cnt += skb_shinfo(skb)->nr_frags;
689
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690 /* to account for hdr wrb */
691 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000692 if (lancer_chip(adapter) || !(cnt & 1)) {
693 *dummy = false;
694 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695 /* add a dummy to make it an even num */
696 cnt++;
697 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000698 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
700 return cnt;
701}
702
703static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
704{
705 wrb->frag_pa_hi = upper_32_bits(addr);
706 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
707 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
Somnath Kotur89b1f492012-06-24 19:40:55 +0000708 wrb->rsvd0 = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709}
710
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000711static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +0530712 struct sk_buff *skb)
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000713{
714 u8 vlan_prio;
715 u16 vlan_tag;
716
717 vlan_tag = vlan_tx_tag_get(skb);
718 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
719 /* If vlan priority provided by OS is NOT in available bmap */
720 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
721 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
722 adapter->recommended_prio;
723
724 return vlan_tag;
725}
726
Sathya Perlac9c47142014-03-27 10:46:19 +0530727/* Used only for IP tunnel packets */
728static u16 skb_inner_ip_proto(struct sk_buff *skb)
729{
730 return (inner_ip_hdr(skb)->version == 4) ?
731 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
732}
733
734static u16 skb_ip_proto(struct sk_buff *skb)
735{
736 return (ip_hdr(skb)->version == 4) ?
737 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
738}
739
Somnath Koturcc4ce022010-10-21 07:11:14 -0700740static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
Sathya Perla748b5392014-05-09 13:29:13 +0530741 struct sk_buff *skb, u32 wrb_cnt, u32 len,
742 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700743{
Sathya Perlac9c47142014-03-27 10:46:19 +0530744 u16 vlan_tag, proto;
Somnath Koturcc4ce022010-10-21 07:11:14 -0700745
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700746 memset(hdr, 0, sizeof(*hdr));
747
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530748 SET_TX_WRB_HDR_BITS(crc, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700749
Ajit Khaparde49e4b8472010-06-14 04:56:07 +0000750 if (skb_is_gso(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530751 SET_TX_WRB_HDR_BITS(lso, hdr, 1);
752 SET_TX_WRB_HDR_BITS(lso_mss, hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000753 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530754 SET_TX_WRB_HDR_BITS(lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700755 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
Sathya Perlac9c47142014-03-27 10:46:19 +0530756 if (skb->encapsulation) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530757 SET_TX_WRB_HDR_BITS(ipcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530758 proto = skb_inner_ip_proto(skb);
759 } else {
760 proto = skb_ip_proto(skb);
761 }
762 if (proto == IPPROTO_TCP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530763 SET_TX_WRB_HDR_BITS(tcpcs, hdr, 1);
Sathya Perlac9c47142014-03-27 10:46:19 +0530764 else if (proto == IPPROTO_UDP)
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530765 SET_TX_WRB_HDR_BITS(udpcs, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766 }
767
Ajit Khaparde4c5102f2011-07-12 22:10:01 -0700768 if (vlan_tx_tag_present(skb)) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530769 SET_TX_WRB_HDR_BITS(vlan, hdr, 1);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000770 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530771 SET_TX_WRB_HDR_BITS(vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772 }
773
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000774 /* To skip HW VLAN tagging: evt = 1, compl = 0 */
Sathya Perlac3c18bc2014-09-02 09:56:47 +0530775 SET_TX_WRB_HDR_BITS(complete, hdr, !skip_hw_vlan);
776 SET_TX_WRB_HDR_BITS(event, hdr, 1);
777 SET_TX_WRB_HDR_BITS(num_wrb, hdr, wrb_cnt);
778 SET_TX_WRB_HDR_BITS(len, hdr, len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700779}
780
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000781static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla748b5392014-05-09 13:29:13 +0530782 bool unmap_single)
Sathya Perla7101e112010-03-22 20:41:12 +0000783{
784 dma_addr_t dma;
785
786 be_dws_le_to_cpu(wrb, sizeof(*wrb));
787
788 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000789 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000790 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000791 dma_unmap_single(dev, dma, wrb->frag_len,
792 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000793 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000794 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000795 }
796}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700797
Sathya Perla3c8def92011-06-12 20:01:58 +0000798static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla748b5392014-05-09 13:29:13 +0530799 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
800 bool skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801{
Sathya Perla7101e112010-03-22 20:41:12 +0000802 dma_addr_t busaddr;
803 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000804 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700806 struct be_eth_wrb *wrb;
807 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000808 bool map_single = false;
809 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700810
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700811 hdr = queue_head_node(txq);
812 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000813 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814
David S. Millerebc8d2a2009-06-09 01:01:31 -0700815 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700816 int len = skb_headlen(skb);
Kalesh AP03d28ff2014-09-19 15:46:56 +0530817
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000818 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
819 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000820 goto dma_err;
821 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700822 wrb = queue_head_node(txq);
823 wrb_fill(wrb, busaddr, len);
824 be_dws_cpu_to_le(wrb, sizeof(*wrb));
825 queue_head_inc(txq);
826 copied += len;
827 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828
David S. Millerebc8d2a2009-06-09 01:01:31 -0700829 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
Sathya Perla748b5392014-05-09 13:29:13 +0530830 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
Kalesh AP03d28ff2014-09-19 15:46:56 +0530831
Ian Campbellb061b392011-08-29 23:18:23 +0000832 busaddr = skb_frag_dma_map(dev, frag, 0,
Eric Dumazet9e903e02011-10-18 21:00:24 +0000833 skb_frag_size(frag), DMA_TO_DEVICE);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000834 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000835 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700836 wrb = queue_head_node(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000837 wrb_fill(wrb, busaddr, skb_frag_size(frag));
David S. Millerebc8d2a2009-06-09 01:01:31 -0700838 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839 queue_head_inc(txq);
Eric Dumazet9e903e02011-10-18 21:00:24 +0000840 copied += skb_frag_size(frag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841 }
842
843 if (dummy_wrb) {
844 wrb = queue_head_node(txq);
845 wrb_fill(wrb, 0, 0);
846 be_dws_cpu_to_le(wrb, sizeof(*wrb));
847 queue_head_inc(txq);
848 }
849
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000850 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851 be_dws_cpu_to_le(hdr, sizeof(*hdr));
852
853 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000854dma_err:
855 txq->head = map_head;
856 while (copied) {
857 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000858 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000859 map_single = false;
860 copied -= wrb->frag_len;
Vasundhara Volamd3de1542014-09-02 09:56:50 +0530861 adapter->drv_stats.dma_map_errors++;
Sathya Perla7101e112010-03-22 20:41:12 +0000862 queue_head_inc(txq);
863 }
864 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700865}
866
Somnath Kotur93040ae2012-06-26 22:32:10 +0000867static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000868 struct sk_buff *skb,
869 bool *skip_hw_vlan)
Somnath Kotur93040ae2012-06-26 22:32:10 +0000870{
871 u16 vlan_tag = 0;
872
873 skb = skb_share_check(skb, GFP_ATOMIC);
874 if (unlikely(!skb))
875 return skb;
876
Sarveshwar Bandiefee8e82013-05-13 20:28:20 +0000877 if (vlan_tx_tag_present(skb))
Somnath Kotur93040ae2012-06-26 22:32:10 +0000878 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
Sarveshwar Bandi52fe29e2013-07-16 12:44:02 +0530879
880 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
881 if (!vlan_tag)
882 vlan_tag = adapter->pvid;
883 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
884 * skip VLAN insertion
885 */
886 if (skip_hw_vlan)
887 *skip_hw_vlan = true;
888 }
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000889
890 if (vlan_tag) {
David S. Miller58717682013-04-30 03:50:54 -0400891 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000892 if (unlikely(!skb))
893 return skb;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000894 skb->vlan_tci = 0;
895 }
896
897 /* Insert the outer VLAN, if any */
898 if (adapter->qnq_vid) {
899 vlan_tag = adapter->qnq_vid;
David S. Miller58717682013-04-30 03:50:54 -0400900 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000901 if (unlikely(!skb))
902 return skb;
903 if (skip_hw_vlan)
904 *skip_hw_vlan = true;
905 }
906
Somnath Kotur93040ae2012-06-26 22:32:10 +0000907 return skb;
908}
909
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000910static bool be_ipv6_exthdr_check(struct sk_buff *skb)
911{
912 struct ethhdr *eh = (struct ethhdr *)skb->data;
913 u16 offset = ETH_HLEN;
914
915 if (eh->h_proto == htons(ETH_P_IPV6)) {
916 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
917
918 offset += sizeof(struct ipv6hdr);
919 if (ip6h->nexthdr != NEXTHDR_TCP &&
920 ip6h->nexthdr != NEXTHDR_UDP) {
921 struct ipv6_opt_hdr *ehdr =
922 (struct ipv6_opt_hdr *) (skb->data + offset);
923
924 /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
925 if (ehdr->hdrlen == 0xff)
926 return true;
927 }
928 }
929 return false;
930}
931
932static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
933{
934 return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
935}
936
Sathya Perla748b5392014-05-09 13:29:13 +0530937static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000938{
Sathya Perlaee9c7992013-05-22 23:04:55 +0000939 return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000940}
941
Vasundhara Volamec495fa2014-03-03 14:25:38 +0530942static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
943 struct sk_buff *skb,
944 bool *skip_hw_vlan)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700945{
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000946 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
Sathya Perlaee9c7992013-05-22 23:04:55 +0000947 unsigned int eth_hdr_len;
948 struct iphdr *ip;
Somnath Kotur93040ae2012-06-26 22:32:10 +0000949
Ajit Khaparde1297f9d2013-04-24 11:52:28 +0000950 /* For padded packets, BE HW modifies tot_len field in IP header
951 * incorrecly when VLAN tag is inserted by HW.
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000952 * For padded packets, Lancer computes incorrect checksum.
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000953 */
Sathya Perlaee9c7992013-05-22 23:04:55 +0000954 eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
955 VLAN_ETH_HLEN : ETH_HLEN;
Somnath Kotur3904dcc2013-05-26 21:09:06 +0000956 if (skb->len <= 60 &&
957 (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000958 is_ipv4_pkt(skb)) {
Somnath Kotur93040ae2012-06-26 22:32:10 +0000959 ip = (struct iphdr *)ip_hdr(skb);
960 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
961 }
962
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000963 /* If vlan tag is already inlined in the packet, skip HW VLAN
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530964 * tagging in pvid-tagging mode
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000965 */
Vasundhara Volamf93f1602014-02-12 16:09:25 +0530966 if (be_pvid_tagging_enabled(adapter) &&
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000967 veh->h_vlan_proto == htons(ETH_P_8021Q))
Sathya Perla748b5392014-05-09 13:29:13 +0530968 *skip_hw_vlan = true;
Ajit Khaparded2cb6ce2013-04-24 11:53:08 +0000969
Somnath Kotur93040ae2012-06-26 22:32:10 +0000970 /* HW has a bug wherein it will calculate CSUM for VLAN
971 * pkts even though it is disabled.
972 * Manually insert VLAN in pkt.
973 */
974 if (skb->ip_summed != CHECKSUM_PARTIAL &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000975 vlan_tx_tag_present(skb)) {
976 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000977 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +0530978 goto err;
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000979 }
980
981 /* HW may lockup when VLAN HW tagging is requested on
982 * certain ipv6 packets. Drop such pkts if the HW workaround to
983 * skip HW tagging is not enabled by FW.
984 */
985 if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
Sathya Perlaee9c7992013-05-22 23:04:55 +0000986 (adapter->pvid || adapter->qnq_vid) &&
987 !qnq_async_evt_rcvd(adapter)))
Ajit Khapardebc0c3402013-04-24 11:52:50 +0000988 goto tx_drop;
989
990 /* Manual VLAN tag insertion to prevent:
991 * ASIC lockup when the ASIC inserts VLAN tag into
992 * certain ipv6 packets. Insert VLAN tags in driver,
993 * and set event, completion, vlan bits accordingly
994 * in the Tx WRB.
995 */
996 if (be_ipv6_tx_stall_chk(adapter, skb) &&
997 be_vlan_tag_tx_chk(adapter, skb)) {
Sathya Perlaee9c7992013-05-22 23:04:55 +0000998 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
Ajit Khaparde1ded1322011-12-09 13:53:17 +0000999 if (unlikely(!skb))
Vasundhara Volamc9128952014-03-03 14:25:07 +05301000 goto err;
Ajit Khaparde1ded1322011-12-09 13:53:17 +00001001 }
1002
Sathya Perlaee9c7992013-05-22 23:04:55 +00001003 return skb;
1004tx_drop:
1005 dev_kfree_skb_any(skb);
Vasundhara Volamc9128952014-03-03 14:25:07 +05301006err:
Sathya Perlaee9c7992013-05-22 23:04:55 +00001007 return NULL;
1008}
1009
Vasundhara Volamec495fa2014-03-03 14:25:38 +05301010static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1011 struct sk_buff *skb,
1012 bool *skip_hw_vlan)
1013{
1014 /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1015 * less may cause a transmit stall on that port. So the work-around is
1016 * to pad short packets (<= 32 bytes) to a 36-byte length.
1017 */
1018 if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1019 if (skb_padto(skb, 36))
1020 return NULL;
1021 skb->len = 36;
1022 }
1023
1024 if (BEx_chip(adapter) || lancer_chip(adapter)) {
1025 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1026 if (!skb)
1027 return NULL;
1028 }
1029
1030 return skb;
1031}
1032
Sathya Perlaee9c7992013-05-22 23:04:55 +00001033static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1034{
1035 struct be_adapter *adapter = netdev_priv(netdev);
1036 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1037 struct be_queue_info *txq = &txo->q;
1038 bool dummy_wrb, stopped = false;
1039 u32 wrb_cnt = 0, copied = 0;
1040 bool skip_hw_vlan = false;
1041 u32 start = txq->head;
1042
1043 skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
Sathya Perlabc617522013-10-01 16:00:01 +05301044 if (!skb) {
1045 tx_stats(txo)->tx_drv_drops++;
Sathya Perlaee9c7992013-05-22 23:04:55 +00001046 return NETDEV_TX_OK;
Sathya Perlabc617522013-10-01 16:00:01 +05301047 }
Sathya Perlaee9c7992013-05-22 23:04:55 +00001048
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001049 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001050
Ajit Khapardebc0c3402013-04-24 11:52:50 +00001051 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1052 skip_hw_vlan);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001053 if (copied) {
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001054 int gso_segs = skb_shinfo(skb)->gso_segs;
1055
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001056 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +00001057 BUG_ON(txo->sent_skb_list[start]);
1058 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001060 /* Ensure txq has space for the next skb; Else stop the queue
1061 * *BEFORE* ringing the tx doorbell, so that we serialze the
1062 * tx compls of the current transmit which'll wake up the queue
1063 */
Sathya Perla7101e112010-03-22 20:41:12 +00001064 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001065 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1066 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +00001067 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001068 stopped = true;
1069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00001071 be_txq_notify(adapter, txo, wrb_cnt);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001072
Eric Dumazetcd8f76c2012-06-07 22:59:59 +00001073 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001074 } else {
1075 txq->head = start;
Sathya Perlabc617522013-10-01 16:00:01 +05301076 tx_stats(txo)->tx_drv_drops++;
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001077 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001078 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079 return NETDEV_TX_OK;
1080}
1081
1082static int be_change_mtu(struct net_device *netdev, int new_mtu)
1083{
1084 struct be_adapter *adapter = netdev_priv(netdev);
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301085 struct device *dev = &adapter->pdev->dev;
1086
1087 if (new_mtu < BE_MIN_MTU || new_mtu > BE_MAX_MTU) {
1088 dev_info(dev, "MTU must be between %d and %d bytes\n",
1089 BE_MIN_MTU, BE_MAX_MTU);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001090 return -EINVAL;
1091 }
Kalesh AP0d3f5cc2014-09-02 09:56:53 +05301092
1093 dev_info(dev, "MTU changed from %d to %d bytes\n",
Sathya Perla748b5392014-05-09 13:29:13 +05301094 netdev->mtu, new_mtu);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001095 netdev->mtu = new_mtu;
1096 return 0;
1097}
1098
1099/*
Ajit Khaparde82903e42010-02-09 01:34:57 +00001100 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1101 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102 */
Sathya Perla10329df2012-06-05 19:37:18 +00001103static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001104{
Vasundhara Volam50762662014-09-12 17:39:14 +05301105 struct device *dev = &adapter->pdev->dev;
Sathya Perla10329df2012-06-05 19:37:18 +00001106 u16 vids[BE_NUM_VLANS_SUPPORTED];
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301107 u16 num = 0, i = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +00001108 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001109
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001110 /* No need to further configure vids if in promiscuous mode */
1111 if (adapter->promiscuous)
1112 return 0;
1113
Sathya Perla92bf14a2013-08-27 16:57:32 +05301114 if (adapter->vlans_added > be_max_vlans(adapter))
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001115 goto set_vlan_promisc;
1116
1117 /* Construct VLAN Table to give to HW */
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301118 for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1119 vids[num++] = cpu_to_le16(i);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001120
Kalesh AP4d567d92014-05-09 13:29:17 +05301121 status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001122 if (status) {
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001123 /* Set to VLAN promisc mode as setting VLAN filter failed */
Kalesh AP4c600052014-05-30 19:06:26 +05301124 if (addl_status(status) ==
1125 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001126 goto set_vlan_promisc;
Vasundhara Volam50762662014-09-12 17:39:14 +05301127 dev_err(dev, "Setting HW VLAN filtering failed\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001128 } else {
1129 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1130 /* hw VLAN filtering re-enabled. */
1131 status = be_cmd_rx_filter(adapter,
1132 BE_FLAGS_VLAN_PROMISC, OFF);
1133 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301134 dev_info(dev,
1135 "Disabling VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001136 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001137 }
1138 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001139 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001140
Sathya Perlab31c50a2009-09-17 10:30:13 -07001141 return status;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001142
1143set_vlan_promisc:
Somnath Kotura6b74e02014-01-21 15:50:55 +05301144 if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1145 return 0;
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001146
1147 status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1148 if (!status) {
Vasundhara Volam50762662014-09-12 17:39:14 +05301149 dev_info(dev, "Enable VLAN Promiscuous mode\n");
Ajit Khaparded9d604f2013-09-27 15:17:58 -05001150 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1151 } else
Vasundhara Volam50762662014-09-12 17:39:14 +05301152 dev_err(dev, "Failed to enable VLAN Promiscuous mode\n");
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001153 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154}
1155
Patrick McHardy80d5c362013-04-19 02:04:28 +00001156static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157{
1158 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001159 int status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001160
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001161 /* Packets with VID 0 are always received by Lancer by default */
1162 if (lancer_chip(adapter) && vid == 0)
Vasundhara Volam48291c22014-03-11 18:53:08 +05301163 return status;
1164
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301165 if (test_bit(vid, adapter->vids))
Vasundhara Volam48291c22014-03-11 18:53:08 +05301166 return status;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001167
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301168 set_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301169 adapter->vlans_added++;
Jiri Pirko8e586132011-12-08 19:52:37 -05001170
Somnath Kotura6b74e02014-01-21 15:50:55 +05301171 status = be_vid_config(adapter);
1172 if (status) {
1173 adapter->vlans_added--;
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301174 clear_bit(vid, adapter->vids);
Somnath Kotura6b74e02014-01-21 15:50:55 +05301175 }
Vasundhara Volam48291c22014-03-11 18:53:08 +05301176
Ajit Khaparde80817cb2011-12-30 12:15:12 +00001177 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178}
1179
Patrick McHardy80d5c362013-04-19 02:04:28 +00001180static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181{
1182 struct be_adapter *adapter = netdev_priv(netdev);
1183
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001184 /* Packets with VID 0 are always received by Lancer by default */
1185 if (lancer_chip(adapter) && vid == 0)
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301186 return 0;
Padmanabh Ratnakara85e9982012-10-20 06:02:40 +00001187
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301188 clear_bit(vid, adapter->vids);
Kalesh AP9d4dfe42014-06-30 13:01:33 +05301189 adapter->vlans_added--;
1190
1191 return be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001192}
1193
Somnath kotur7ad09452014-03-03 14:24:43 +05301194static void be_clear_promisc(struct be_adapter *adapter)
1195{
1196 adapter->promiscuous = false;
Kalesh APa0794882014-05-30 19:06:23 +05301197 adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
Somnath kotur7ad09452014-03-03 14:24:43 +05301198
1199 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1200}
1201
Sathya Perlaa54769f2011-10-24 02:45:00 +00001202static void be_set_rx_mode(struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203{
1204 struct be_adapter *adapter = netdev_priv(netdev);
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001205 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001206
1207 if (netdev->flags & IFF_PROMISC) {
Sathya Perla5b8821b2011-08-02 19:57:44 +00001208 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
Sathya Perla24307ee2009-06-18 00:09:25 +00001209 adapter->promiscuous = true;
1210 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001211 }
Sathya Perla24307ee2009-06-18 00:09:25 +00001212
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001213 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +00001214 if (adapter->promiscuous) {
Somnath kotur7ad09452014-03-03 14:24:43 +05301215 be_clear_promisc(adapter);
Sathya Perlac0e64ef2011-08-02 19:57:43 +00001216 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00001217 be_vid_config(adapter);
Sathya Perla24307ee2009-06-18 00:09:25 +00001218 }
1219
Sathya Perlae7b909a2009-11-22 22:01:10 +00001220 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00001221 if (netdev->flags & IFF_ALLMULTI ||
Kalesh APa0794882014-05-30 19:06:23 +05301222 netdev_mc_count(netdev) > be_max_mc(adapter))
1223 goto set_mcast_promisc;
Sathya Perla24307ee2009-06-18 00:09:25 +00001224
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001225 if (netdev_uc_count(netdev) != adapter->uc_macs) {
1226 struct netdev_hw_addr *ha;
1227 int i = 1; /* First slot is claimed by the Primary MAC */
1228
1229 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1230 be_cmd_pmac_del(adapter, adapter->if_handle,
1231 adapter->pmac_id[i], 0);
1232 }
1233
Sathya Perla92bf14a2013-08-27 16:57:32 +05301234 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
Ajit Khapardefbc13f02012-03-18 06:23:21 +00001235 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1236 adapter->promiscuous = true;
1237 goto done;
1238 }
1239
1240 netdev_for_each_uc_addr(ha, adapter->netdev) {
1241 adapter->uc_macs++; /* First slot is for Primary MAC */
1242 be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1243 adapter->if_handle,
1244 &adapter->pmac_id[adapter->uc_macs], 0);
1245 }
1246 }
1247
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001248 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
Kalesh APa0794882014-05-30 19:06:23 +05301249 if (!status) {
1250 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1251 adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1252 goto done;
Padmanabh Ratnakar0fc16eb2012-04-25 01:46:06 +00001253 }
Kalesh APa0794882014-05-30 19:06:23 +05301254
1255set_mcast_promisc:
1256 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1257 return;
1258
1259 /* Set to MCAST promisc mode if setting MULTICAST address fails
1260 * or if num configured exceeds what we support
1261 */
1262 status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1263 if (!status)
1264 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
Sathya Perla24307ee2009-06-18 00:09:25 +00001265done:
1266 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267}
1268
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001269static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1270{
1271 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001272 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001273 int status;
1274
Sathya Perla11ac75e2011-12-13 00:58:50 +00001275 if (!sriov_enabled(adapter))
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001276 return -EPERM;
1277
Sathya Perla11ac75e2011-12-13 00:58:50 +00001278 if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001279 return -EINVAL;
1280
Vasundhara Volam3c31aaf2014-08-01 17:47:31 +05301281 /* Proceed further only if user provided MAC is different
1282 * from active MAC
1283 */
1284 if (ether_addr_equal(mac, vf_cfg->mac_addr))
1285 return 0;
1286
Sathya Perla3175d8c2013-07-23 15:25:03 +05301287 if (BEx_chip(adapter)) {
1288 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1289 vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001290
Sathya Perla11ac75e2011-12-13 00:58:50 +00001291 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1292 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05301293 } else {
1294 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1295 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00001296 }
1297
Kalesh APabccf232014-07-17 16:20:24 +05301298 if (status) {
1299 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1300 mac, vf, status);
1301 return be_cmd_status(status);
1302 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001303
Kalesh APabccf232014-07-17 16:20:24 +05301304 ether_addr_copy(vf_cfg->mac_addr, mac);
1305
1306 return 0;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001307}
1308
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001309static int be_get_vf_config(struct net_device *netdev, int vf,
Sathya Perla748b5392014-05-09 13:29:13 +05301310 struct ifla_vf_info *vi)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001311{
1312 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla11ac75e2011-12-13 00:58:50 +00001313 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001314
Sathya Perla11ac75e2011-12-13 00:58:50 +00001315 if (!sriov_enabled(adapter))
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001316 return -EPERM;
1317
Sathya Perla11ac75e2011-12-13 00:58:50 +00001318 if (vf >= adapter->num_vfs)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001319 return -EINVAL;
1320
1321 vi->vf = vf;
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001322 vi->max_tx_rate = vf_cfg->tx_rate;
1323 vi->min_tx_rate = 0;
Ajit Khapardea60b3a12013-09-27 15:18:56 -05001324 vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1325 vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
Sathya Perla11ac75e2011-12-13 00:58:50 +00001326 memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301327 vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
Ajit Khaparde64600ea2010-07-23 01:50:34 +00001328
1329 return 0;
1330}
1331
Sathya Perla748b5392014-05-09 13:29:13 +05301332static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001333{
1334 struct be_adapter *adapter = netdev_priv(netdev);
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001335 struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001336 int status = 0;
1337
Sathya Perla11ac75e2011-12-13 00:58:50 +00001338 if (!sriov_enabled(adapter))
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001339 return -EPERM;
1340
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001341 if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001342 return -EINVAL;
1343
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001344 if (vlan || qos) {
1345 vlan |= qos << VLAN_PRIO_SHIFT;
Somnath Koturc5022242014-03-03 14:24:20 +05301346 if (vf_cfg->vlan_tag != vlan)
Ajit Khapardeb9fc0e532013-09-27 15:18:46 -05001347 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1348 vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001349 } else {
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00001350 /* Reset Transparent Vlan Tagging. */
Somnath Koturc5022242014-03-03 14:24:20 +05301351 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1352 vf + 1, vf_cfg->if_handle, 0);
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001353 }
1354
Kalesh APabccf232014-07-17 16:20:24 +05301355 if (status) {
1356 dev_err(&adapter->pdev->dev,
1357 "VLAN %d config on VF %d failed : %#x\n", vlan,
1358 vf, status);
1359 return be_cmd_status(status);
1360 }
1361
1362 vf_cfg->vlan_tag = vlan;
1363
1364 return 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +00001365}
1366
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001367static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1368 int min_tx_rate, int max_tx_rate)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001369{
1370 struct be_adapter *adapter = netdev_priv(netdev);
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301371 struct device *dev = &adapter->pdev->dev;
1372 int percent_rate, status = 0;
1373 u16 link_speed = 0;
1374 u8 link_status;
Ajit Khapardee1d18732010-07-23 01:52:13 +00001375
Sathya Perla11ac75e2011-12-13 00:58:50 +00001376 if (!sriov_enabled(adapter))
Ajit Khapardee1d18732010-07-23 01:52:13 +00001377 return -EPERM;
1378
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001379 if (vf >= adapter->num_vfs)
Ajit Khapardee1d18732010-07-23 01:52:13 +00001380 return -EINVAL;
1381
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04001382 if (min_tx_rate)
1383 return -EINVAL;
1384
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301385 if (!max_tx_rate)
1386 goto config_qos;
1387
1388 status = be_cmd_link_status_query(adapter, &link_speed,
1389 &link_status, 0);
1390 if (status)
1391 goto err;
1392
1393 if (!link_status) {
1394 dev_err(dev, "TX-rate setting not allowed when link is down\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05301395 status = -ENETDOWN;
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301396 goto err;
Ajit Khaparde94f434c2011-12-30 12:15:30 +00001397 }
Ajit Khapardee1d18732010-07-23 01:52:13 +00001398
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301399 if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1400 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1401 link_speed);
1402 status = -EINVAL;
1403 goto err;
1404 }
1405
1406 /* On Skyhawk the QOS setting must be done only as a % value */
1407 percent_rate = link_speed / 100;
1408 if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1409 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1410 percent_rate);
1411 status = -EINVAL;
1412 goto err;
1413 }
1414
1415config_qos:
1416 status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001417 if (status)
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05301418 goto err;
1419
1420 adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1421 return 0;
1422
1423err:
1424 dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1425 max_tx_rate, vf);
Kalesh APabccf232014-07-17 16:20:24 +05301426 return be_cmd_status(status);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001427}
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301428static int be_set_vf_link_state(struct net_device *netdev, int vf,
1429 int link_state)
1430{
1431 struct be_adapter *adapter = netdev_priv(netdev);
1432 int status;
1433
1434 if (!sriov_enabled(adapter))
1435 return -EPERM;
1436
1437 if (vf >= adapter->num_vfs)
1438 return -EINVAL;
1439
1440 status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
Kalesh APabccf232014-07-17 16:20:24 +05301441 if (status) {
1442 dev_err(&adapter->pdev->dev,
1443 "Link state change on VF %d failed: %#x\n", vf, status);
1444 return be_cmd_status(status);
1445 }
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301446
Kalesh APabccf232014-07-17 16:20:24 +05301447 adapter->vf_cfg[vf].plink_tracking = link_state;
1448
1449 return 0;
Suresh Reddybdce2ad2014-03-11 18:53:04 +05301450}
Ajit Khapardee1d18732010-07-23 01:52:13 +00001451
Sathya Perla2632baf2013-10-01 16:00:00 +05301452static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1453 ulong now)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001454{
Sathya Perla2632baf2013-10-01 16:00:00 +05301455 aic->rx_pkts_prev = rx_pkts;
1456 aic->tx_reqs_prev = tx_pkts;
1457 aic->jiffies = now;
1458}
Sathya Perlaac124ff2011-07-25 19:10:14 +00001459
Sathya Perla2632baf2013-10-01 16:00:00 +05301460static void be_eqd_update(struct be_adapter *adapter)
1461{
1462 struct be_set_eqd set_eqd[MAX_EVT_QS];
1463 int eqd, i, num = 0, start;
1464 struct be_aic_obj *aic;
1465 struct be_eq_obj *eqo;
1466 struct be_rx_obj *rxo;
1467 struct be_tx_obj *txo;
1468 u64 rx_pkts, tx_pkts;
1469 ulong now;
1470 u32 pps, delta;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001471
Sathya Perla2632baf2013-10-01 16:00:00 +05301472 for_all_evt_queues(adapter, eqo, i) {
1473 aic = &adapter->aic_obj[eqo->idx];
1474 if (!aic->enable) {
1475 if (aic->jiffies)
1476 aic->jiffies = 0;
1477 eqd = aic->et_eqd;
1478 goto modify_eqd;
1479 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480
Sathya Perla2632baf2013-10-01 16:00:00 +05301481 rxo = &adapter->rx_obj[eqo->idx];
1482 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001483 start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301484 rx_pkts = rxo->stats.rx_pkts;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001485 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001486
Sathya Perla2632baf2013-10-01 16:00:00 +05301487 txo = &adapter->tx_obj[eqo->idx];
1488 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -07001489 start = u64_stats_fetch_begin_irq(&txo->stats.sync);
Sathya Perla2632baf2013-10-01 16:00:00 +05301490 tx_pkts = txo->stats.tx_reqs;
Eric W. Biederman57a77442014-03-13 21:26:42 -07001491 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
Sathya Perla4097f662009-03-24 16:40:13 -07001492
Sathya Perla4097f662009-03-24 16:40:13 -07001493
Sathya Perla2632baf2013-10-01 16:00:00 +05301494 /* Skip, if wrapped around or first calculation */
1495 now = jiffies;
1496 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1497 rx_pkts < aic->rx_pkts_prev ||
1498 tx_pkts < aic->tx_reqs_prev) {
1499 be_aic_update(aic, rx_pkts, tx_pkts, now);
1500 continue;
1501 }
Sathya Perlaab1594e2011-07-25 19:10:15 +00001502
Sathya Perla2632baf2013-10-01 16:00:00 +05301503 delta = jiffies_to_msecs(now - aic->jiffies);
1504 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1505 (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1506 eqd = (pps / 15000) << 2;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001507
Sathya Perla2632baf2013-10-01 16:00:00 +05301508 if (eqd < 8)
1509 eqd = 0;
1510 eqd = min_t(u32, eqd, aic->max_eqd);
1511 eqd = max_t(u32, eqd, aic->min_eqd);
1512
1513 be_aic_update(aic, rx_pkts, tx_pkts, now);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001514modify_eqd:
Sathya Perla2632baf2013-10-01 16:00:00 +05301515 if (eqd != aic->prev_eqd) {
1516 set_eqd[num].delay_multiplier = (eqd * 65)/100;
1517 set_eqd[num].eq_id = eqo->q.id;
1518 aic->prev_eqd = eqd;
1519 num++;
1520 }
Sathya Perlaac124ff2011-07-25 19:10:14 +00001521 }
Sathya Perla2632baf2013-10-01 16:00:00 +05301522
1523 if (num)
1524 be_cmd_modify_eqd(adapter, set_eqd, num);
Sathya Perla4097f662009-03-24 16:40:13 -07001525}
1526
Sathya Perla3abcded2010-10-03 22:12:27 -07001527static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla748b5392014-05-09 13:29:13 +05301528 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001529{
Sathya Perlaac124ff2011-07-25 19:10:14 +00001530 struct be_rx_stats *stats = rx_stats(rxo);
Sathya Perla4097f662009-03-24 16:40:13 -07001531
Sathya Perlaab1594e2011-07-25 19:10:15 +00001532 u64_stats_update_begin(&stats->sync);
Sathya Perla3abcded2010-10-03 22:12:27 -07001533 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001534 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001535 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001536 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001537 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001538 if (rxcp->err)
Sathya Perlaac124ff2011-07-25 19:10:14 +00001539 stats->rx_compl_err++;
Sathya Perlaab1594e2011-07-25 19:10:15 +00001540 u64_stats_update_end(&stats->sync);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541}
1542
Sathya Perla2e588f82011-03-11 02:49:26 +00001543static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001544{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001545 /* L4 checksum is not reliable for non TCP/UDP packets.
Sathya Perlac9c47142014-03-27 10:46:19 +05301546 * Also ignore ipcksm for ipv6 pkts
1547 */
Sathya Perla2e588f82011-03-11 02:49:26 +00001548 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
Sathya Perlac9c47142014-03-27 10:46:19 +05301549 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001550}
1551
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301552static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001554 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001555 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001556 struct be_queue_info *rxq = &rxo->q;
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301557 u16 frag_idx = rxq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558
Sathya Perla3abcded2010-10-03 22:12:27 -07001559 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 BUG_ON(!rx_page_info->page);
1561
Sathya Perlae50287b2014-03-04 12:14:38 +05301562 if (rx_page_info->last_frag) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001563 dma_unmap_page(&adapter->pdev->dev,
1564 dma_unmap_addr(rx_page_info, bus),
1565 adapter->big_page_size, DMA_FROM_DEVICE);
Sathya Perlae50287b2014-03-04 12:14:38 +05301566 rx_page_info->last_frag = false;
1567 } else {
1568 dma_sync_single_for_cpu(&adapter->pdev->dev,
1569 dma_unmap_addr(rx_page_info, bus),
1570 rx_frag_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001571 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001572
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301573 queue_tail_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574 atomic_dec(&rxq->used);
1575 return rx_page_info;
1576}
1577
1578/* Throwaway the data in the Rx completion */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001579static void be_rx_compl_discard(struct be_rx_obj *rxo,
1580 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001582 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001583 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001584
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001585 for (i = 0; i < num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301586 page_info = get_rx_page_info(rxo);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001587 put_page(page_info->page);
1588 memset(page_info, 0, sizeof(*page_info));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001589 }
1590}
1591
1592/*
1593 * skb_fill_rx_data forms a complete skb for an ether frame
1594 * indicated by rxcp.
1595 */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001596static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1597 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001598{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001599 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001600 u16 i, j;
1601 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001602 u8 *start;
1603
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301604 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001605 start = page_address(page_info->page) + page_info->page_offset;
1606 prefetch(start);
1607
1608 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001609 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611 skb->len = curr_frag_len;
1612 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001613 memcpy(skb->data, start, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614 /* Complete packet has now been moved to data */
1615 put_page(page_info->page);
1616 skb->data_len = 0;
1617 skb->tail += curr_frag_len;
1618 } else {
Eric Dumazetac1ae5f2012-07-13 03:19:41 +00001619 hdr_len = ETH_HLEN;
1620 memcpy(skb->data, start, hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621 skb_shinfo(skb)->nr_frags = 1;
Ian Campbellb061b392011-08-29 23:18:23 +00001622 skb_frag_set_page(skb, 0, page_info->page);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001623 skb_shinfo(skb)->frags[0].page_offset =
1624 page_info->page_offset + hdr_len;
Sathya Perla748b5392014-05-09 13:29:13 +05301625 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1626 curr_frag_len - hdr_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001627 skb->data_len = curr_frag_len - hdr_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001628 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001629 skb->tail += hdr_len;
1630 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001631 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001632
Sathya Perla2e588f82011-03-11 02:49:26 +00001633 if (rxcp->pkt_size <= rx_frag_size) {
1634 BUG_ON(rxcp->num_rcvd != 1);
1635 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636 }
1637
1638 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001639 remaining = rxcp->pkt_size - curr_frag_len;
1640 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301641 page_info = get_rx_page_info(rxo);
Sathya Perla2e588f82011-03-11 02:49:26 +00001642 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001643
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001644 /* Coalesce all frags from the same physical page in one slot */
1645 if (page_info->page_offset == 0) {
1646 /* Fresh page */
1647 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001648 skb_frag_set_page(skb, j, page_info->page);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001649 skb_shinfo(skb)->frags[j].page_offset =
1650 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001651 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001652 skb_shinfo(skb)->nr_frags++;
1653 } else {
1654 put_page(page_info->page);
1655 }
1656
Eric Dumazet9e903e02011-10-18 21:00:24 +00001657 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001658 skb->len += curr_frag_len;
1659 skb->data_len += curr_frag_len;
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001660 skb->truesize += rx_frag_size;
Sathya Perla2e588f82011-03-11 02:49:26 +00001661 remaining -= curr_frag_len;
Ajit Khaparde205859a2010-02-09 01:34:21 +00001662 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001663 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001664 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665}
1666
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001667/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6384a4d2013-10-25 10:40:16 +05301668static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001669 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001670{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001671 struct be_adapter *adapter = rxo->adapter;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001672 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001673 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001674
Eric Dumazetbb349bb2012-01-25 03:56:30 +00001675 skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
Sathya Perlaa058a632010-02-17 01:34:22 +00001676 if (unlikely(!skb)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001677 rx_stats(rxo)->rx_drops_no_skbs++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001678 be_rx_compl_discard(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679 return;
1680 }
1681
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001682 skb_fill_rx_data(rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001683
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001684 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001685 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001686 else
1687 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001689 skb->protocol = eth_type_trans(skb, netdev);
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001690 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001691 if (netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001692 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301693
Tom Herbertb6c0e892014-08-27 21:27:17 -07001694 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301695 skb_mark_napi_id(skb, napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
Jiri Pirko343e43c2011-08-25 02:50:51 +00001697 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001698 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001699
1700 netif_receive_skb(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701}
1702
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001703/* Process the RX completion indicated by rxcp when GRO is enabled */
Jingoo Han4188e7d2013-08-05 18:02:02 +09001704static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1705 struct napi_struct *napi,
1706 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001708 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001710 struct sk_buff *skb = NULL;
Sathya Perla2e588f82011-03-11 02:49:26 +00001711 u16 remaining, curr_frag_len;
1712 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001713
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001714 skb = napi_get_frags(napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001715 if (!skb) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001716 be_rx_compl_discard(rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001717 return;
1718 }
1719
Sathya Perla2e588f82011-03-11 02:49:26 +00001720 remaining = rxcp->pkt_size;
1721 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05301722 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723
1724 curr_frag_len = min(remaining, rx_frag_size);
1725
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001726 /* Coalesce all frags from the same physical page in one slot */
1727 if (i == 0 || page_info->page_offset == 0) {
1728 /* First frag or Fresh page */
1729 j++;
Ian Campbellb061b392011-08-29 23:18:23 +00001730 skb_frag_set_page(skb, j, page_info->page);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001731 skb_shinfo(skb)->frags[j].page_offset =
1732 page_info->page_offset;
Eric Dumazet9e903e02011-10-18 21:00:24 +00001733 skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001734 } else {
1735 put_page(page_info->page);
1736 }
Eric Dumazet9e903e02011-10-18 21:00:24 +00001737 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
Eric Dumazetbdb28a92011-10-13 06:31:02 +00001738 skb->truesize += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001739 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740 memset(page_info, 0, sizeof(*page_info));
1741 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001742 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001743
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001744 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001745 skb->len = rxcp->pkt_size;
1746 skb->data_len = rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001747 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturaaa6dae2012-05-02 03:40:49 +00001748 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001749 if (adapter->netdev->features & NETIF_F_RXHASH)
Tom Herbertd2464c82013-12-17 23:23:51 -08001750 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
Sathya Perlac9c47142014-03-27 10:46:19 +05301751
Tom Herbertb6c0e892014-08-27 21:27:17 -07001752 skb->csum_level = rxcp->tunneled;
Sathya Perla6384a4d2013-10-25 10:40:16 +05301753 skb_mark_napi_id(skb, napi);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001754
Jiri Pirko343e43c2011-08-25 02:50:51 +00001755 if (rxcp->vlanf)
Patrick McHardy86a9bad2013-04-19 02:04:30 +00001756 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
Ajit Khaparde4c5102f2011-07-12 22:10:01 -07001757
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001758 napi_gro_frags(napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001759}
1760
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001761static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1762 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001763{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301764 rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
1765 rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
1766 rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
1767 rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
1768 rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
1769 rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
1770 rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
1771 rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
1772 rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
1773 rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
1774 rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001775 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301776 rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
1777 rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001778 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301779 rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
Sathya Perlac9c47142014-03-27 10:46:19 +05301780 rxcp->tunneled =
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301781 GET_RX_COMPL_V1_BITS(tunneled, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001782}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001783
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001784static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1785 struct be_rx_compl_info *rxcp)
Sathya Perla2e588f82011-03-11 02:49:26 +00001786{
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301787 rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
1788 rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
1789 rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
1790 rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
1791 rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
1792 rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
1793 rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
1794 rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
1795 rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
1796 rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
1797 rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001798 if (rxcp->vlanf) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301799 rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
1800 rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001801 }
Sathya Perlac3c18bc2014-09-02 09:56:47 +05301802 rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
1803 rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001804}
1805
1806static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1807{
1808 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1809 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1810 struct be_adapter *adapter = rxo->adapter;
1811
1812 /* For checking the valid bit it is Ok to use either definition as the
1813 * valid bit is at the same position in both v0 and v1 Rx compl */
1814 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815 return NULL;
1816
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001817 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001818 be_dws_le_to_cpu(compl, sizeof(*compl));
1819
1820 if (adapter->be3_native)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001821 be_parse_rx_compl_v1(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001822 else
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001823 be_parse_rx_compl_v0(compl, rxcp);
Sathya Perla2e588f82011-03-11 02:49:26 +00001824
Somnath Koture38b1702013-05-29 22:55:56 +00001825 if (rxcp->ip_frag)
1826 rxcp->l4_csum = 0;
1827
Sathya Perla15d72182011-03-21 20:49:26 +00001828 if (rxcp->vlanf) {
Vasundhara Volamf93f1602014-02-12 16:09:25 +05301829 /* In QNQ modes, if qnq bit is not set, then the packet was
1830 * tagged only with the transparent outer vlan-tag and must
1831 * not be treated as a vlan packet by host
1832 */
1833 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
Sathya Perla15d72182011-03-21 20:49:26 +00001834 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001835
Sathya Perla15d72182011-03-21 20:49:26 +00001836 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001837 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001838
Somnath Kotur939cf302011-08-18 21:51:49 -07001839 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
Ravikumar Nelavellif6cbd362014-05-09 13:29:16 +05301840 !test_bit(rxcp->vlan_tag, adapter->vids))
Sathya Perla15d72182011-03-21 20:49:26 +00001841 rxcp->vlanf = 0;
1842 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001843
1844 /* As the compl has been parsed, reset it; we wont touch it again */
1845 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001846
Sathya Perla3abcded2010-10-03 22:12:27 -07001847 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001848 return rxcp;
1849}
1850
Eric Dumazet1829b082011-03-01 05:48:12 +00001851static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001852{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001854
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001855 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001856 gfp |= __GFP_COMP;
1857 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858}
1859
1860/*
1861 * Allocate a page, split it to fragments of size rx_frag_size and post as
1862 * receive buffers to BE
1863 */
Ajit Khapardec30d7262014-09-12 17:39:16 +05301864static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865{
Sathya Perla3abcded2010-10-03 22:12:27 -07001866 struct be_adapter *adapter = rxo->adapter;
Sathya Perla26d92f92010-01-21 22:52:08 -08001867 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001868 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869 struct page *pagep = NULL;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001870 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001871 struct be_eth_rx_d *rxd;
1872 u64 page_dmaaddr = 0, frag_dmaaddr;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301873 u32 posted, page_offset = 0, notify = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874
Sathya Perla3abcded2010-10-03 22:12:27 -07001875 page_info = &rxo->page_info_tbl[rxq->head];
Ajit Khapardec30d7262014-09-12 17:39:16 +05301876 for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001877 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001878 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001879 if (unlikely(!pagep)) {
Sathya Perlaac124ff2011-07-25 19:10:14 +00001880 rx_stats(rxo)->rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881 break;
1882 }
Ivan Veceraba42fad2014-01-15 11:11:34 +01001883 page_dmaaddr = dma_map_page(dev, pagep, 0,
1884 adapter->big_page_size,
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001885 DMA_FROM_DEVICE);
Ivan Veceraba42fad2014-01-15 11:11:34 +01001886 if (dma_mapping_error(dev, page_dmaaddr)) {
1887 put_page(pagep);
1888 pagep = NULL;
Vasundhara Volamd3de1542014-09-02 09:56:50 +05301889 adapter->drv_stats.dma_map_errors++;
Ivan Veceraba42fad2014-01-15 11:11:34 +01001890 break;
1891 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301892 page_offset = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001893 } else {
1894 get_page(pagep);
Sathya Perlae50287b2014-03-04 12:14:38 +05301895 page_offset += rx_frag_size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001896 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301897 page_info->page_offset = page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898 page_info->page = pagep;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001899
1900 rxd = queue_head_node(rxq);
Sathya Perlae50287b2014-03-04 12:14:38 +05301901 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1903 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001904
1905 /* Any space left in the current big page for another frag? */
1906 if ((page_offset + rx_frag_size + rx_frag_size) >
1907 adapter->big_page_size) {
1908 pagep = NULL;
Sathya Perlae50287b2014-03-04 12:14:38 +05301909 page_info->last_frag = true;
1910 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1911 } else {
1912 dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001913 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001914
1915 prev_page_info = page_info;
1916 queue_head_inc(rxq);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001917 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001918 }
Sathya Perlae50287b2014-03-04 12:14:38 +05301919
1920 /* Mark the last frag of a page when we break out of the above loop
1921 * with no more slots available in the RXQ
1922 */
1923 if (pagep) {
1924 prev_page_info->last_frag = true;
1925 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1926 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001927
1928 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929 atomic_add(posted, &rxq->used);
Sathya Perla6384a4d2013-10-25 10:40:16 +05301930 if (rxo->rx_post_starved)
1931 rxo->rx_post_starved = false;
Ajit Khapardec30d7262014-09-12 17:39:16 +05301932 do {
1933 notify = min(256u, posted);
1934 be_rxq_notify(adapter, rxq->id, notify);
1935 posted -= notify;
1936 } while (posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001937 } else if (atomic_read(&rxq->used) == 0) {
1938 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001939 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001941}
1942
Sathya Perla5fb379e2009-06-18 00:02:59 +00001943static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001944{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1946
1947 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1948 return NULL;
1949
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001950 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001951 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1952
1953 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1954
1955 queue_tail_inc(tx_cq);
1956 return txcp;
1957}
1958
Sathya Perla3c8def92011-06-12 20:01:58 +00001959static u16 be_tx_compl_process(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05301960 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961{
Sathya Perla3c8def92011-06-12 20:01:58 +00001962 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001963 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001964 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001966 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1967 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001969 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001971 sent_skbs[txq->tail] = NULL;
1972
1973 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001974 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001976 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001977 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001978 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001979 unmap_tx_frag(&adapter->pdev->dev, wrb,
1980 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001981 unmap_skb_hdr = false;
1982
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001983 num_wrbs++;
1984 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001985 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986
Rick Jones96d49222014-08-28 08:53:16 -07001987 dev_consume_skb_any(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001988 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001989}
1990
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001991/* Return the number of events in the event queue */
1992static inline int events_get(struct be_eq_obj *eqo)
Sathya Perla859b1e42009-08-10 03:43:51 +00001993{
1994 struct be_eq_entry *eqe;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001995 int num = 0;
Sathya Perla859b1e42009-08-10 03:43:51 +00001996
Sathya Perla10ef9ab2012-02-09 18:05:27 +00001997 do {
1998 eqe = queue_tail_node(&eqo->q);
1999 if (eqe->evt == 0)
2000 break;
2001
2002 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00002003 eqe->evt = 0;
2004 num++;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002005 queue_tail_inc(&eqo->q);
2006 } while (true);
Sathya Perla859b1e42009-08-10 03:43:51 +00002007
2008 return num;
2009}
2010
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002011/* Leaves the EQ is disarmed state */
2012static void be_eq_clean(struct be_eq_obj *eqo)
2013{
2014 int num = events_get(eqo);
2015
2016 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2017}
2018
2019static void be_rx_cq_clean(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002020{
2021 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07002022 struct be_queue_info *rxq = &rxo->q;
2023 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002024 struct be_rx_compl_info *rxcp;
Sathya Perlad23e9462012-12-17 19:38:51 +00002025 struct be_adapter *adapter = rxo->adapter;
2026 int flush_wait = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002027
Sathya Perlad23e9462012-12-17 19:38:51 +00002028 /* Consume pending rx completions.
2029 * Wait for the flush completion (identified by zero num_rcvd)
2030 * to arrive. Notify CQ even when there are no more CQ entries
2031 * for HW to flush partially coalesced CQ entries.
2032 * In Lancer, there is no need to wait for flush compl.
2033 */
2034 for (;;) {
2035 rxcp = be_rx_compl_get(rxo);
Kalesh APddf11692014-07-17 16:20:28 +05302036 if (!rxcp) {
Sathya Perlad23e9462012-12-17 19:38:51 +00002037 if (lancer_chip(adapter))
2038 break;
2039
2040 if (flush_wait++ > 10 || be_hw_error(adapter)) {
2041 dev_warn(&adapter->pdev->dev,
2042 "did not receive flush compl\n");
2043 break;
2044 }
2045 be_cq_notify(adapter, rx_cq->id, true, 0);
2046 mdelay(1);
2047 } else {
2048 be_rx_compl_discard(rxo, rxcp);
Sathya Perla3f5dffe2013-05-08 02:05:49 +00002049 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perlad23e9462012-12-17 19:38:51 +00002050 if (rxcp->num_rcvd == 0)
2051 break;
2052 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002053 }
2054
Sathya Perlad23e9462012-12-17 19:38:51 +00002055 /* After cleanup, leave the CQ in unarmed state */
2056 be_cq_notify(adapter, rx_cq->id, false, 0);
2057
2058 /* Then free posted rx buffers that were not used */
Suresh Reddy0b0ef1d2014-01-15 13:23:38 +05302059 while (atomic_read(&rxq->used) > 0) {
2060 page_info = get_rx_page_info(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002061 put_page(page_info->page);
2062 memset(page_info, 0, sizeof(*page_info));
2063 }
2064 BUG_ON(atomic_read(&rxq->used));
Sathya Perla482c9e72011-06-29 23:33:17 +00002065 rxq->tail = rxq->head = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002066}
2067
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002068static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002069{
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002070 struct be_tx_obj *txo;
2071 struct be_queue_info *txq;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002072 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002073 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00002074 struct sk_buff *sent_skb;
2075 bool dummy_wrb;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002076 int i, pending_txqs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002077
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302078 /* Stop polling for compls when HW has been silent for 10ms */
Sathya Perlaa8e91792009-08-10 03:42:43 +00002079 do {
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002080 pending_txqs = adapter->num_tx_qs;
2081
2082 for_all_tx_queues(adapter, txo, i) {
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302083 cmpl = 0;
2084 num_wrbs = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002085 txq = &txo->q;
2086 while ((txcp = be_tx_compl_get(&txo->cq))) {
Sathya Perlac3c18bc2014-09-02 09:56:47 +05302087 end_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002088 num_wrbs += be_tx_compl_process(adapter, txo,
2089 end_idx);
2090 cmpl++;
2091 }
2092 if (cmpl) {
2093 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2094 atomic_sub(num_wrbs, &txq->used);
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302095 timeo = 0;
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002096 }
2097 if (atomic_read(&txq->used) == 0)
2098 pending_txqs--;
Sathya Perlaa8e91792009-08-10 03:42:43 +00002099 }
2100
Vasundhara Volam1a3d0712014-04-14 16:12:40 +05302101 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
Sathya Perlaa8e91792009-08-10 03:42:43 +00002102 break;
2103
2104 mdelay(1);
2105 } while (true);
2106
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002107 for_all_tx_queues(adapter, txo, i) {
2108 txq = &txo->q;
2109 if (atomic_read(&txq->used))
2110 dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2111 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00002112
Sathya Perla0ae57bb2012-02-23 18:50:14 +00002113 /* free posted tx for which compls will never arrive */
2114 while (atomic_read(&txq->used)) {
2115 sent_skb = txo->sent_skb_list[txq->tail];
2116 end_idx = txq->tail;
2117 num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2118 &dummy_wrb);
2119 index_adv(&end_idx, num_wrbs - 1, txq->len);
2120 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2121 atomic_sub(num_wrbs, &txq->used);
2122 }
Sathya Perlab03388d2010-02-18 00:37:17 +00002123 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002124}
2125
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002126static void be_evt_queues_destroy(struct be_adapter *adapter)
2127{
2128 struct be_eq_obj *eqo;
2129 int i;
2130
2131 for_all_evt_queues(adapter, eqo, i) {
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002132 if (eqo->q.created) {
2133 be_eq_clean(eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002134 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302135 napi_hash_del(&eqo->napi);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302136 netif_napi_del(&eqo->napi);
Padmanabh Ratnakar19d59aa2012-07-12 03:57:21 +00002137 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002138 be_queue_free(adapter, &eqo->q);
2139 }
2140}
2141
2142static int be_evt_queues_create(struct be_adapter *adapter)
2143{
2144 struct be_queue_info *eq;
2145 struct be_eq_obj *eqo;
Sathya Perla2632baf2013-10-01 16:00:00 +05302146 struct be_aic_obj *aic;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002147 int i, rc;
2148
Sathya Perla92bf14a2013-08-27 16:57:32 +05302149 adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2150 adapter->cfg_num_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002151
2152 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302153 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2154 BE_NAPI_WEIGHT);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302155 napi_hash_add(&eqo->napi);
Sathya Perla2632baf2013-10-01 16:00:00 +05302156 aic = &adapter->aic_obj[i];
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002157 eqo->adapter = adapter;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002158 eqo->idx = i;
Sathya Perla2632baf2013-10-01 16:00:00 +05302159 aic->max_eqd = BE_MAX_EQD;
2160 aic->enable = true;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002161
2162 eq = &eqo->q;
2163 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302164 sizeof(struct be_eq_entry));
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002165 if (rc)
2166 return rc;
2167
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302168 rc = be_cmd_eq_create(adapter, eqo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002169 if (rc)
2170 return rc;
2171 }
Sathya Perla1cfafab2012-02-23 18:50:15 +00002172 return 0;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002173}
2174
Sathya Perla5fb379e2009-06-18 00:02:59 +00002175static void be_mcc_queues_destroy(struct be_adapter *adapter)
2176{
2177 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002178
Sathya Perla8788fdc2009-07-27 22:52:03 +00002179 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002180 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002181 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002182 be_queue_free(adapter, q);
2183
Sathya Perla8788fdc2009-07-27 22:52:03 +00002184 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002185 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00002186 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002187 be_queue_free(adapter, q);
2188}
2189
2190/* Must be called only after TX qs are created as MCC shares TX EQ */
2191static int be_mcc_queues_create(struct be_adapter *adapter)
2192{
2193 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002194
Sathya Perla8788fdc2009-07-27 22:52:03 +00002195 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002196 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302197 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002198 goto err;
2199
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002200 /* Use the default EQ for MCC completions */
2201 if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002202 goto mcc_cq_free;
2203
Sathya Perla8788fdc2009-07-27 22:52:03 +00002204 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002205 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2206 goto mcc_cq_destroy;
2207
Sathya Perla8788fdc2009-07-27 22:52:03 +00002208 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00002209 goto mcc_q_free;
2210
2211 return 0;
2212
2213mcc_q_free:
2214 be_queue_free(adapter, q);
2215mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00002216 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002217mcc_cq_free:
2218 be_queue_free(adapter, cq);
2219err:
2220 return -1;
2221}
2222
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223static void be_tx_queues_destroy(struct be_adapter *adapter)
2224{
2225 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00002226 struct be_tx_obj *txo;
2227 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002228
Sathya Perla3c8def92011-06-12 20:01:58 +00002229 for_all_tx_queues(adapter, txo, i) {
2230 q = &txo->q;
2231 if (q->created)
2232 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2233 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002234
Sathya Perla3c8def92011-06-12 20:01:58 +00002235 q = &txo->cq;
2236 if (q->created)
2237 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2238 be_queue_free(adapter, q);
2239 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240}
2241
Sathya Perla77071332013-08-27 16:57:34 +05302242static int be_tx_qs_create(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002243{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002244 struct be_queue_info *cq, *eq;
Sathya Perla3c8def92011-06-12 20:01:58 +00002245 struct be_tx_obj *txo;
Sathya Perla92bf14a2013-08-27 16:57:32 +05302246 int status, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002247
Sathya Perla92bf14a2013-08-27 16:57:32 +05302248 adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
Sathya Perladafc0fe2011-10-24 02:45:02 +00002249
Sathya Perla3c8def92011-06-12 20:01:58 +00002250 for_all_tx_queues(adapter, txo, i) {
2251 cq = &txo->cq;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002252 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2253 sizeof(struct be_eth_tx_compl));
2254 if (status)
2255 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002256
John Stultz827da442013-10-07 15:51:58 -07002257 u64_stats_init(&txo->stats.sync);
2258 u64_stats_init(&txo->stats.sync_compl);
2259
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002260 /* If num_evt_qs is less than num_tx_qs, then more than
2261 * one txq share an eq
2262 */
2263 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2264 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2265 if (status)
2266 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002267
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002268 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2269 sizeof(struct be_eth_wrb));
2270 if (status)
2271 return status;
2272
Vasundhara Volam94d73aa2013-04-21 23:28:14 +00002273 status = be_cmd_txq_create(adapter, txo);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002274 if (status)
2275 return status;
2276 }
2277
Sathya Perlad3791422012-09-28 04:39:44 +00002278 dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2279 adapter->num_tx_qs);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002280 return 0;
2281}
2282
2283static void be_rx_cqs_destroy(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284{
2285 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07002286 struct be_rx_obj *rxo;
2287 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002288
Sathya Perla3abcded2010-10-03 22:12:27 -07002289 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002290 q = &rxo->cq;
2291 if (q->created)
2292 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2293 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002294 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002295}
2296
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002297static int be_rx_cqs_create(struct be_adapter *adapter)
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002298{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002299 struct be_queue_info *eq, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002300 struct be_rx_obj *rxo;
2301 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302
Sathya Perla92bf14a2013-08-27 16:57:32 +05302303 /* We can create as many RSS rings as there are EQs. */
2304 adapter->num_rx_qs = adapter->num_evt_qs;
2305
2306 /* We'll use RSS only if atleast 2 RSS rings are supported.
2307 * When RSS is used, we'll need a default RXQ for non-IP traffic.
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002308 */
Sathya Perla92bf14a2013-08-27 16:57:32 +05302309 if (adapter->num_rx_qs > 1)
2310 adapter->num_rx_qs++;
2311
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002312 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07002313 for_all_rx_queues(adapter, rxo, i) {
2314 rxo->adapter = adapter;
Sathya Perla3abcded2010-10-03 22:12:27 -07002315 cq = &rxo->cq;
2316 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
Sathya Perla748b5392014-05-09 13:29:13 +05302317 sizeof(struct be_eth_rx_compl));
Sathya Perla3abcded2010-10-03 22:12:27 -07002318 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002319 return rc;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320
John Stultz827da442013-10-07 15:51:58 -07002321 u64_stats_init(&rxo->stats.sync);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002322 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2323 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
Sathya Perla3abcded2010-10-03 22:12:27 -07002324 if (rc)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002325 return rc;
Sathya Perla3abcded2010-10-03 22:12:27 -07002326 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002327
Sathya Perlad3791422012-09-28 04:39:44 +00002328 dev_info(&adapter->pdev->dev,
2329 "created %d RSS queue(s) and 1 default RX queue\n",
2330 adapter->num_rx_qs - 1);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002331 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002332}
2333
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002334static irqreturn_t be_intx(int irq, void *dev)
2335{
Sathya Perlae49cc342012-11-27 19:50:02 +00002336 struct be_eq_obj *eqo = dev;
2337 struct be_adapter *adapter = eqo->adapter;
2338 int num_evts = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002339
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002340 /* IRQ is not expected when NAPI is scheduled as the EQ
2341 * will not be armed.
2342 * But, this can happen on Lancer INTx where it takes
2343 * a while to de-assert INTx or in BE2 where occasionaly
2344 * an interrupt may be raised even when EQ is unarmed.
2345 * If NAPI is already scheduled, then counting & notifying
2346 * events will orphan them.
Sathya Perlae49cc342012-11-27 19:50:02 +00002347 */
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002348 if (napi_schedule_prep(&eqo->napi)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002349 num_evts = events_get(eqo);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002350 __napi_schedule(&eqo->napi);
2351 if (num_evts)
2352 eqo->spurious_intr = 0;
2353 }
Sathya Perlae49cc342012-11-27 19:50:02 +00002354 be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
Sathya Perlad0b9cec2013-01-11 22:47:02 +00002355
2356 /* Return IRQ_HANDLED only for the the first spurious intr
2357 * after a valid intr to stop the kernel from branding
2358 * this irq as a bad one!
2359 */
2360 if (num_evts || eqo->spurious_intr++ == 0)
2361 return IRQ_HANDLED;
2362 else
2363 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002364}
2365
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002366static irqreturn_t be_msix(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002368 struct be_eq_obj *eqo = dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002369
Sathya Perla0b545a62012-11-23 00:27:18 +00002370 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2371 napi_schedule(&eqo->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002372 return IRQ_HANDLED;
2373}
2374
Sathya Perla2e588f82011-03-11 02:49:26 +00002375static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002376{
Somnath Koture38b1702013-05-29 22:55:56 +00002377 return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002378}
2379
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002380static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
Sathya Perla748b5392014-05-09 13:29:13 +05302381 int budget, int polling)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002382{
Sathya Perla3abcded2010-10-03 22:12:27 -07002383 struct be_adapter *adapter = rxo->adapter;
2384 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00002385 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002386 u32 work_done;
Ajit Khapardec30d7262014-09-12 17:39:16 +05302387 u32 frags_consumed = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002388
2389 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002390 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002391 if (!rxcp)
2392 break;
2393
Sathya Perla12004ae2011-08-02 19:57:46 +00002394 /* Is it a flush compl that has no data */
2395 if (unlikely(rxcp->num_rcvd == 0))
2396 goto loop_continue;
2397
2398 /* Discard compl with partial DMA Lancer B0 */
2399 if (unlikely(!rxcp->pkt_size)) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002400 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002401 goto loop_continue;
Sathya Perla64642812010-12-01 01:04:17 +00002402 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00002403
Sathya Perla12004ae2011-08-02 19:57:46 +00002404 /* On BE drop pkts that arrive due to imperfect filtering in
2405 * promiscuous mode on some skews
2406 */
2407 if (unlikely(rxcp->port != adapter->port_num &&
Sathya Perla748b5392014-05-09 13:29:13 +05302408 !lancer_chip(adapter))) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002409 be_rx_compl_discard(rxo, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002410 goto loop_continue;
2411 }
2412
Sathya Perla6384a4d2013-10-25 10:40:16 +05302413 /* Don't do gro when we're busy_polling */
2414 if (do_gro(rxcp) && polling != BUSY_POLLING)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002415 be_rx_compl_process_gro(rxo, napi, rxcp);
Sathya Perla12004ae2011-08-02 19:57:46 +00002416 else
Sathya Perla6384a4d2013-10-25 10:40:16 +05302417 be_rx_compl_process(rxo, napi, rxcp);
2418
Sathya Perla12004ae2011-08-02 19:57:46 +00002419loop_continue:
Ajit Khapardec30d7262014-09-12 17:39:16 +05302420 frags_consumed += rxcp->num_rcvd;
Sathya Perla2e588f82011-03-11 02:49:26 +00002421 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002422 }
2423
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002424 if (work_done) {
2425 be_cq_notify(adapter, rx_cq->id, true, work_done);
Padmanabh Ratnakar9372cac2011-11-03 01:49:55 +00002426
Sathya Perla6384a4d2013-10-25 10:40:16 +05302427 /* When an rx-obj gets into post_starved state, just
2428 * let be_worker do the posting.
2429 */
2430 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2431 !rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302432 be_post_rx_frags(rxo, GFP_ATOMIC,
2433 max_t(u32, MAX_RX_POST,
2434 frags_consumed));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002435 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002436
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002437 return work_done;
2438}
2439
Kalesh AP512bb8a2014-09-02 09:56:49 +05302440static inline void be_update_tx_err(struct be_tx_obj *txo, u32 status)
2441{
2442 switch (status) {
2443 case BE_TX_COMP_HDR_PARSE_ERR:
2444 tx_stats(txo)->tx_hdr_parse_err++;
2445 break;
2446 case BE_TX_COMP_NDMA_ERR:
2447 tx_stats(txo)->tx_dma_err++;
2448 break;
2449 case BE_TX_COMP_ACL_ERR:
2450 tx_stats(txo)->tx_spoof_check_err++;
2451 break;
2452 }
2453}
2454
2455static inline void lancer_update_tx_err(struct be_tx_obj *txo, u32 status)
2456{
2457 switch (status) {
2458 case LANCER_TX_COMP_LSO_ERR:
2459 tx_stats(txo)->tx_tso_err++;
2460 break;
2461 case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2462 case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2463 tx_stats(txo)->tx_spoof_check_err++;
2464 break;
2465 case LANCER_TX_COMP_QINQ_ERR:
2466 tx_stats(txo)->tx_qinq_err++;
2467 break;
2468 case LANCER_TX_COMP_PARITY_ERR:
2469 tx_stats(txo)->tx_internal_parity_err++;
2470 break;
2471 case LANCER_TX_COMP_DMA_ERR:
2472 tx_stats(txo)->tx_dma_err++;
2473 break;
2474 }
2475}
2476
Sathya Perlac8f64612014-09-02 09:56:55 +05302477static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2478 int idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002479{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480 struct be_eth_tx_compl *txcp;
Sathya Perlac8f64612014-09-02 09:56:55 +05302481 int num_wrbs = 0, work_done = 0;
Kalesh AP512bb8a2014-09-02 09:56:49 +05302482 u32 compl_status;
Sathya Perlac8f64612014-09-02 09:56:55 +05302483 u16 last_idx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002484
Sathya Perlac8f64612014-09-02 09:56:55 +05302485 while ((txcp = be_tx_compl_get(&txo->cq))) {
2486 last_idx = GET_TX_COMPL_BITS(wrb_index, txcp);
2487 num_wrbs += be_tx_compl_process(adapter, txo, last_idx);
2488 work_done++;
2489
Kalesh AP512bb8a2014-09-02 09:56:49 +05302490 compl_status = GET_TX_COMPL_BITS(status, txcp);
2491 if (compl_status) {
2492 if (lancer_chip(adapter))
2493 lancer_update_tx_err(txo, compl_status);
2494 else
2495 be_update_tx_err(txo, compl_status);
2496 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002497 }
2498
2499 if (work_done) {
2500 be_cq_notify(adapter, txo->cq.id, true, work_done);
2501 atomic_sub(num_wrbs, &txo->q.used);
2502
2503 /* As Tx wrbs have been freed up, wake up netdev queue
2504 * if it was stopped due to lack of tx wrbs. */
2505 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
Sathya Perla748b5392014-05-09 13:29:13 +05302506 atomic_read(&txo->q.used) < txo->q.len / 2) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002507 netif_wake_subqueue(adapter->netdev, idx);
Sathya Perla3c8def92011-06-12 20:01:58 +00002508 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002509
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002510 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2511 tx_stats(txo)->tx_compl += work_done;
2512 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2513 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002514}
Sathya Perla3c8def92011-06-12 20:01:58 +00002515
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302516int be_poll(struct napi_struct *napi, int budget)
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002517{
2518 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2519 struct be_adapter *adapter = eqo->adapter;
Sathya Perla0b545a62012-11-23 00:27:18 +00002520 int max_work = 0, work, i, num_evts;
Sathya Perla6384a4d2013-10-25 10:40:16 +05302521 struct be_rx_obj *rxo;
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302522 struct be_tx_obj *txo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002523
Sathya Perla0b545a62012-11-23 00:27:18 +00002524 num_evts = events_get(eqo);
2525
Sathya Perlaa4906ea2014-09-02 09:56:56 +05302526 for_all_tx_queues_on_eq(adapter, eqo, txo, i)
2527 be_process_tx(adapter, txo, i);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002528
Sathya Perla6384a4d2013-10-25 10:40:16 +05302529 if (be_lock_napi(eqo)) {
2530 /* This loop will iterate twice for EQ0 in which
2531 * completions of the last RXQ (default one) are also processed
2532 * For other EQs the loop iterates only once
2533 */
2534 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2535 work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2536 max_work = max(work, max_work);
2537 }
2538 be_unlock_napi(eqo);
2539 } else {
2540 max_work = budget;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002541 }
2542
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002543 if (is_mcc_eqo(eqo))
2544 be_process_mcc(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002545
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002546 if (max_work < budget) {
2547 napi_complete(napi);
Sathya Perla0b545a62012-11-23 00:27:18 +00002548 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002549 } else {
2550 /* As we'll continue in polling mode, count and clear events */
Sathya Perla0b545a62012-11-23 00:27:18 +00002551 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
Padmanabh Ratnakar93c86702011-12-19 01:53:35 +00002552 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002553 return max_work;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002554}
2555
Sathya Perla6384a4d2013-10-25 10:40:16 +05302556#ifdef CONFIG_NET_RX_BUSY_POLL
2557static int be_busy_poll(struct napi_struct *napi)
2558{
2559 struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2560 struct be_adapter *adapter = eqo->adapter;
2561 struct be_rx_obj *rxo;
2562 int i, work = 0;
2563
2564 if (!be_lock_busy_poll(eqo))
2565 return LL_FLUSH_BUSY;
2566
2567 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2568 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2569 if (work)
2570 break;
2571 }
2572
2573 be_unlock_busy_poll(eqo);
2574 return work;
2575}
2576#endif
2577
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002578void be_detect_error(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002579{
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002580 u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2581 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002582 u32 i;
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302583 bool error_detected = false;
2584 struct device *dev = &adapter->pdev->dev;
2585 struct net_device *netdev = adapter->netdev;
Ajit Khaparde7c185272010-07-29 06:16:33 +00002586
Sathya Perlad23e9462012-12-17 19:38:51 +00002587 if (be_hw_error(adapter))
Sathya Perla72f02482011-11-10 19:17:58 +00002588 return;
2589
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002590 if (lancer_chip(adapter)) {
2591 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2592 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2593 sliport_err1 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302594 SLIPORT_ERROR1_OFFSET);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002595 sliport_err2 = ioread32(adapter->db +
Sathya Perla748b5392014-05-09 13:29:13 +05302596 SLIPORT_ERROR2_OFFSET);
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302597 adapter->hw_error = true;
2598 /* Do not log error messages if its a FW reset */
2599 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2600 sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2601 dev_info(dev, "Firmware update in progress\n");
2602 } else {
2603 error_detected = true;
2604 dev_err(dev, "Error detected in the card\n");
2605 dev_err(dev, "ERR: sliport status 0x%x\n",
2606 sliport_status);
2607 dev_err(dev, "ERR: sliport error1 0x%x\n",
2608 sliport_err1);
2609 dev_err(dev, "ERR: sliport error2 0x%x\n",
2610 sliport_err2);
2611 }
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002612 }
2613 } else {
2614 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302615 PCICFG_UE_STATUS_LOW, &ue_lo);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002616 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302617 PCICFG_UE_STATUS_HIGH, &ue_hi);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002618 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302619 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
Padmanabh Ratnakare1cfb672011-11-03 01:50:08 +00002620 pci_read_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302621 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002622
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002623 ue_lo = (ue_lo & ~ue_lo_mask);
2624 ue_hi = (ue_hi & ~ue_hi_mask);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002625
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302626 /* On certain platforms BE hardware can indicate spurious UEs.
2627 * Allow HW to stop working completely in case of a real UE.
2628 * Hence not setting the hw_error for UE detection.
2629 */
2630
2631 if (ue_lo || ue_hi) {
2632 error_detected = true;
2633 dev_err(dev,
2634 "Unrecoverable Error detected in the adapter");
2635 dev_err(dev, "Please reboot server to recover");
2636 if (skyhawk_chip(adapter))
2637 adapter->hw_error = true;
2638 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2639 if (ue_lo & 1)
2640 dev_err(dev, "UE: %s bit set\n",
2641 ue_status_low_desc[i]);
2642 }
2643 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2644 if (ue_hi & 1)
2645 dev_err(dev, "UE: %s bit set\n",
2646 ue_status_hi_desc[i]);
2647 }
Somnath Kotur4bebb562013-12-05 12:07:55 +05302648 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00002649 }
Somnath Kotureb0eecc2014-02-12 16:07:54 +05302650 if (error_detected)
2651 netif_carrier_off(netdev);
Ajit Khaparde7c185272010-07-29 06:16:33 +00002652}
2653
Sathya Perla8d56ff12009-11-22 22:02:26 +00002654static void be_msix_disable(struct be_adapter *adapter)
2655{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002656 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002657 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002658 adapter->num_msix_vec = 0;
Sathya Perla68d7bdc2013-08-27 16:57:35 +05302659 adapter->num_msix_roce_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002660 }
2661}
2662
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002663static int be_msix_enable(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002664{
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002665 int i, num_vec;
Sathya Perlad3791422012-09-28 04:39:44 +00002666 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002667
Sathya Perla92bf14a2013-08-27 16:57:32 +05302668 /* If RoCE is supported, program the max number of NIC vectors that
2669 * may be configured via set-channels, along with vectors needed for
2670 * RoCe. Else, just program the number we'll use initially.
2671 */
2672 if (be_roce_supported(adapter))
2673 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2674 2 * num_online_cpus());
2675 else
2676 num_vec = adapter->cfg_num_qs;
Sathya Perla3abcded2010-10-03 22:12:27 -07002677
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002678 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002679 adapter->msix_entries[i].entry = i;
2680
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002681 num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2682 MIN_MSIX_VECTORS, num_vec);
2683 if (num_vec < 0)
2684 goto fail;
Sathya Perlad3791422012-09-28 04:39:44 +00002685
Sathya Perla92bf14a2013-08-27 16:57:32 +05302686 if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2687 adapter->num_msix_roce_vec = num_vec / 2;
2688 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2689 adapter->num_msix_roce_vec);
2690 }
2691
2692 adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2693
2694 dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2695 adapter->num_msix_vec);
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002696 return 0;
Alexander Gordeev7dc4c062014-02-18 11:11:40 +01002697
2698fail:
2699 dev_warn(dev, "MSIx enable failed\n");
2700
2701 /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2702 if (!be_physfn(adapter))
2703 return num_vec;
2704 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002705}
2706
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002707static inline int be_msix_vec_get(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302708 struct be_eq_obj *eqo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002709{
Sathya Perlaf2f781a2013-08-27 16:57:30 +05302710 return adapter->msix_entries[eqo->msix_idx].vector;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002711}
2712
2713static int be_msix_register(struct be_adapter *adapter)
2714{
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002715 struct net_device *netdev = adapter->netdev;
2716 struct be_eq_obj *eqo;
2717 int status, i, vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002718
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002719 for_all_evt_queues(adapter, eqo, i) {
2720 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2721 vec = be_msix_vec_get(adapter, eqo);
2722 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002723 if (status)
2724 goto err_msix;
2725 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002726
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002727 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002728err_msix:
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002729 for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2730 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2731 dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
Sathya Perla748b5392014-05-09 13:29:13 +05302732 status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002733 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002734 return status;
2735}
2736
2737static int be_irq_register(struct be_adapter *adapter)
2738{
2739 struct net_device *netdev = adapter->netdev;
2740 int status;
2741
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002742 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002743 status = be_msix_register(adapter);
2744 if (status == 0)
2745 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002746 /* INTx is not supported for VF */
2747 if (!be_physfn(adapter))
2748 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002749 }
2750
Sathya Perlae49cc342012-11-27 19:50:02 +00002751 /* INTx: only the first EQ is used */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002752 netdev->irq = adapter->pdev->irq;
2753 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
Sathya Perlae49cc342012-11-27 19:50:02 +00002754 &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002755 if (status) {
2756 dev_err(&adapter->pdev->dev,
2757 "INTx request IRQ failed - err %d\n", status);
2758 return status;
2759 }
2760done:
2761 adapter->isr_registered = true;
2762 return 0;
2763}
2764
2765static void be_irq_unregister(struct be_adapter *adapter)
2766{
2767 struct net_device *netdev = adapter->netdev;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002768 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002769 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002770
2771 if (!adapter->isr_registered)
2772 return;
2773
2774 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002775 if (!msix_enabled(adapter)) {
Sathya Perlae49cc342012-11-27 19:50:02 +00002776 free_irq(netdev->irq, &adapter->eq_obj[0]);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002777 goto done;
2778 }
2779
2780 /* MSIx */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002781 for_all_evt_queues(adapter, eqo, i)
2782 free_irq(be_msix_vec_get(adapter, eqo), eqo);
Sathya Perla3abcded2010-10-03 22:12:27 -07002783
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002784done:
2785 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786}
2787
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002788static void be_rx_qs_destroy(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002789{
2790 struct be_queue_info *q;
2791 struct be_rx_obj *rxo;
2792 int i;
2793
2794 for_all_rx_queues(adapter, rxo, i) {
2795 q = &rxo->q;
2796 if (q->created) {
2797 be_cmd_rxq_destroy(adapter, q);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002798 be_rx_cq_clean(rxo);
Sathya Perla482c9e72011-06-29 23:33:17 +00002799 }
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002800 be_queue_free(adapter, q);
Sathya Perla482c9e72011-06-29 23:33:17 +00002801 }
2802}
2803
Sathya Perla889cd4b2010-05-30 23:33:45 +00002804static int be_close(struct net_device *netdev)
2805{
2806 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002807 struct be_eq_obj *eqo;
2808 int i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002809
Kalesh APe1ad8e32014-04-14 16:12:41 +05302810 /* This protection is needed as be_close() may be called even when the
2811 * adapter is in cleared state (after eeh perm failure)
2812 */
2813 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2814 return 0;
2815
Parav Pandit045508a2012-03-26 14:27:13 +00002816 be_roce_dev_close(adapter);
2817
Ivan Veceradff345c52013-11-27 08:59:32 +01002818 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2819 for_all_evt_queues(adapter, eqo, i) {
Somnath Kotur04d3d622013-05-02 03:36:55 +00002820 napi_disable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302821 be_disable_busy_poll(eqo);
2822 }
David S. Miller71237b62013-11-28 18:53:36 -05002823 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
Somnath Kotur04d3d622013-05-02 03:36:55 +00002824 }
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002825
2826 be_async_mcc_disable(adapter);
2827
2828 /* Wait for all pending tx completions to arrive so that
2829 * all tx skbs are freed.
2830 */
Sathya Perlafba87552013-05-08 02:05:50 +00002831 netif_tx_disable(netdev);
Sathya Perla6e1f9972013-08-22 12:23:41 +05302832 be_tx_compl_clean(adapter);
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002833
2834 be_rx_qs_destroy(adapter);
2835
Ajit Khaparded11a3472013-11-18 10:44:37 -06002836 for (i = 1; i < (adapter->uc_macs + 1); i++)
2837 be_cmd_pmac_del(adapter, adapter->if_handle,
2838 adapter->pmac_id[i], 0);
2839 adapter->uc_macs = 0;
2840
Sathya Perlaa323d9b2012-12-17 19:38:50 +00002841 for_all_evt_queues(adapter, eqo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002842 if (msix_enabled(adapter))
2843 synchronize_irq(be_msix_vec_get(adapter, eqo));
2844 else
2845 synchronize_irq(netdev->irq);
2846 be_eq_clean(eqo);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002847 }
2848
Sathya Perla889cd4b2010-05-30 23:33:45 +00002849 be_irq_unregister(adapter);
2850
Sathya Perla482c9e72011-06-29 23:33:17 +00002851 return 0;
2852}
2853
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002854static int be_rx_qs_create(struct be_adapter *adapter)
Sathya Perla482c9e72011-06-29 23:33:17 +00002855{
2856 struct be_rx_obj *rxo;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002857 int rc, i, j;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302858 u8 rss_hkey[RSS_HASH_KEY_LEN];
2859 struct rss_info *rss = &adapter->rss_info;
Sathya Perla482c9e72011-06-29 23:33:17 +00002860
2861 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002862 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2863 sizeof(struct be_eth_rx_d));
2864 if (rc)
2865 return rc;
2866 }
2867
2868 /* The FW would like the default RXQ to be created first */
2869 rxo = default_rxo(adapter);
2870 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2871 adapter->if_handle, false, &rxo->rss_id);
2872 if (rc)
2873 return rc;
2874
2875 for_all_rss_queues(adapter, rxo, i) {
Sathya Perla482c9e72011-06-29 23:33:17 +00002876 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002877 rx_frag_size, adapter->if_handle,
2878 true, &rxo->rss_id);
Sathya Perla482c9e72011-06-29 23:33:17 +00002879 if (rc)
2880 return rc;
2881 }
2882
2883 if (be_multi_rxq(adapter)) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302884 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2885 j += adapter->num_rx_qs - 1) {
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002886 for_all_rss_queues(adapter, rxo, i) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302887 if ((j + i) >= RSS_INDIR_TABLE_LEN)
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002888 break;
Venkata Duvvurue2557872014-04-21 15:38:00 +05302889 rss->rsstable[j + i] = rxo->rss_id;
2890 rss->rss_queue[j + i] = i;
Padmanabh Ratnakare9008ee2011-11-25 05:48:53 +00002891 }
2892 }
Venkata Duvvurue2557872014-04-21 15:38:00 +05302893 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2894 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
Suresh Reddy594ad542013-04-25 23:03:20 +00002895
2896 if (!BEx_chip(adapter))
Venkata Duvvurue2557872014-04-21 15:38:00 +05302897 rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2898 RSS_ENABLE_UDP_IPV6;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302899 } else {
2900 /* Disable RSS, if only default RX Q is created */
Venkata Duvvurue2557872014-04-21 15:38:00 +05302901 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302902 }
Suresh Reddy594ad542013-04-25 23:03:20 +00002903
Venkata Duvvurue2557872014-04-21 15:38:00 +05302904 get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
Sathya Perla748b5392014-05-09 13:29:13 +05302905 rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
Venkata Duvvurue2557872014-04-21 15:38:00 +05302906 128, rss_hkey);
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302907 if (rc) {
Venkata Duvvurue2557872014-04-21 15:38:00 +05302908 rss->rss_flags = RSS_ENABLE_NONE;
Vasundhara Volamda1388d2014-01-06 13:02:23 +05302909 return rc;
Sathya Perla482c9e72011-06-29 23:33:17 +00002910 }
2911
Venkata Duvvurue2557872014-04-21 15:38:00 +05302912 memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2913
Sathya Perla482c9e72011-06-29 23:33:17 +00002914 /* First time posting */
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002915 for_all_rx_queues(adapter, rxo, i)
Ajit Khapardec30d7262014-09-12 17:39:16 +05302916 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002917 return 0;
2918}
2919
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002920static int be_open(struct net_device *netdev)
2921{
2922 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002923 struct be_eq_obj *eqo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002924 struct be_rx_obj *rxo;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002925 struct be_tx_obj *txo;
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002926 u8 link_status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002927 int status, i;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002928
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002929 status = be_rx_qs_create(adapter);
Sathya Perla482c9e72011-06-29 23:33:17 +00002930 if (status)
2931 goto err;
2932
Somnath Koturc2bba3d2013-05-02 03:37:08 +00002933 status = be_irq_register(adapter);
2934 if (status)
2935 goto err;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002936
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002937 for_all_rx_queues(adapter, rxo, i)
Sathya Perla3abcded2010-10-03 22:12:27 -07002938 be_cq_notify(adapter, rxo->cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002939
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002940 for_all_tx_queues(adapter, txo, i)
2941 be_cq_notify(adapter, txo->cq.id, true, 0);
2942
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002943 be_async_mcc_enable(adapter);
2944
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002945 for_all_evt_queues(adapter, eqo, i) {
2946 napi_enable(&eqo->napi);
Sathya Perla6384a4d2013-10-25 10:40:16 +05302947 be_enable_busy_poll(eqo);
Suresh Reddy4cad9f32014-07-11 14:03:01 +05302948 be_eq_notify(adapter, eqo->q.id, true, true, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002949 }
Somnath Kotur04d3d622013-05-02 03:36:55 +00002950 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00002951
Sathya Perla323ff712012-09-28 04:39:43 +00002952 status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
Ajit Khapardeb236916a2011-12-30 12:15:40 +00002953 if (!status)
2954 be_link_status_update(adapter, link_status);
2955
Sathya Perlafba87552013-05-08 02:05:50 +00002956 netif_tx_start_all_queues(netdev);
Parav Pandit045508a2012-03-26 14:27:13 +00002957 be_roce_dev_open(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05302958
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302959#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05302960 if (skyhawk_chip(adapter))
2961 vxlan_get_rx_port(netdev);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05302962#endif
2963
Sathya Perla889cd4b2010-05-30 23:33:45 +00002964 return 0;
2965err:
2966 be_close(adapter->netdev);
2967 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002968}
2969
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002970static int be_setup_wol(struct be_adapter *adapter, bool enable)
2971{
2972 struct be_dma_mem cmd;
2973 int status = 0;
2974 u8 mac[ETH_ALEN];
2975
2976 memset(mac, 0, ETH_ALEN);
2977
2978 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Joe Perchesede23fa2013-08-26 22:45:23 -07002979 cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2980 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05302981 if (!cmd.va)
Kalesh AP6b568682014-07-17 16:20:22 +05302982 return -ENOMEM;
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002983
2984 if (enable) {
2985 status = pci_write_config_dword(adapter->pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05302986 PCICFG_PM_CONTROL_OFFSET,
2987 PCICFG_PM_CONTROL_MASK);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002988 if (status) {
2989 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002990 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002991 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2992 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002993 return status;
2994 }
2995 status = be_cmd_enable_magic_wol(adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05302996 adapter->netdev->dev_addr,
2997 &cmd);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002998 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2999 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
3000 } else {
3001 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
3002 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
3003 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
3004 }
3005
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003006 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003007 return status;
3008}
3009
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003010/*
3011 * Generate a seed MAC address from the PF MAC Address using jhash.
3012 * MAC Address for VFs are assigned incrementally starting from the seed.
3013 * These addresses are programmed in the ASIC by the PF and the VF driver
3014 * queries for the MAC address during its probe.
3015 */
Sathya Perla4c876612013-02-03 20:30:11 +00003016static int be_vf_eth_addr_config(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003017{
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003018 u32 vf;
Sathya Perla3abcded2010-10-03 22:12:27 -07003019 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003020 u8 mac[ETH_ALEN];
Sathya Perla11ac75e2011-12-13 00:58:50 +00003021 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003022
3023 be_vf_eth_addr_generate(adapter, mac);
3024
Sathya Perla11ac75e2011-12-13 00:58:50 +00003025 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303026 if (BEx_chip(adapter))
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003027 status = be_cmd_pmac_add(adapter, mac,
Sathya Perla11ac75e2011-12-13 00:58:50 +00003028 vf_cfg->if_handle,
3029 &vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303030 else
3031 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3032 vf + 1);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003033
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003034 if (status)
3035 dev_err(&adapter->pdev->dev,
Sathya Perla748b5392014-05-09 13:29:13 +05303036 "Mac address assignment failed for VF %d\n",
3037 vf);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003038 else
Sathya Perla11ac75e2011-12-13 00:58:50 +00003039 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003040
3041 mac[5] += 1;
3042 }
3043 return status;
3044}
3045
Sathya Perla4c876612013-02-03 20:30:11 +00003046static int be_vfs_mac_query(struct be_adapter *adapter)
3047{
3048 int status, vf;
3049 u8 mac[ETH_ALEN];
3050 struct be_vf_cfg *vf_cfg;
Sathya Perla4c876612013-02-03 20:30:11 +00003051
3052 for_all_vfs(adapter, vf_cfg, vf) {
Suresh Reddyb188f092014-01-15 13:23:39 +05303053 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3054 mac, vf_cfg->if_handle,
3055 false, vf+1);
Sathya Perla4c876612013-02-03 20:30:11 +00003056 if (status)
3057 return status;
3058 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3059 }
3060 return 0;
3061}
3062
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003063static void be_vf_clear(struct be_adapter *adapter)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003064{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003065 struct be_vf_cfg *vf_cfg;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003066 u32 vf;
3067
Sathya Perla257a3fe2013-06-14 15:54:51 +05303068 if (pci_vfs_assigned(adapter->pdev)) {
Sathya Perla4c876612013-02-03 20:30:11 +00003069 dev_warn(&adapter->pdev->dev,
3070 "VFs are assigned to VMs: not disabling VFs\n");
Sathya Perla39f1d942012-05-08 19:41:24 +00003071 goto done;
3072 }
3073
Sathya Perlab4c1df92013-05-08 02:05:47 +00003074 pci_disable_sriov(adapter->pdev);
3075
Sathya Perla11ac75e2011-12-13 00:58:50 +00003076 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla3175d8c2013-07-23 15:25:03 +05303077 if (BEx_chip(adapter))
Sathya Perla11ac75e2011-12-13 00:58:50 +00003078 be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3079 vf_cfg->pmac_id, vf + 1);
Sathya Perla3175d8c2013-07-23 15:25:03 +05303080 else
3081 be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3082 vf + 1);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003083
Sathya Perla11ac75e2011-12-13 00:58:50 +00003084 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3085 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003086done:
3087 kfree(adapter->vf_cfg);
3088 adapter->num_vfs = 0;
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303089 adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00003090}
3091
Sathya Perla77071332013-08-27 16:57:34 +05303092static void be_clear_queues(struct be_adapter *adapter)
3093{
3094 be_mcc_queues_destroy(adapter);
3095 be_rx_cqs_destroy(adapter);
3096 be_tx_queues_destroy(adapter);
3097 be_evt_queues_destroy(adapter);
3098}
3099
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303100static void be_cancel_worker(struct be_adapter *adapter)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003101{
Sathya Perla191eb752012-02-23 18:50:13 +00003102 if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3103 cancel_delayed_work_sync(&adapter->work);
3104 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3105 }
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303106}
3107
Somnath Koturb05004a2013-12-05 12:08:16 +05303108static void be_mac_clear(struct be_adapter *adapter)
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303109{
3110 int i;
3111
Somnath Koturb05004a2013-12-05 12:08:16 +05303112 if (adapter->pmac_id) {
3113 for (i = 0; i < (adapter->uc_macs + 1); i++)
3114 be_cmd_pmac_del(adapter, adapter->if_handle,
3115 adapter->pmac_id[i], 0);
3116 adapter->uc_macs = 0;
3117
3118 kfree(adapter->pmac_id);
3119 adapter->pmac_id = NULL;
3120 }
3121}
3122
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303123#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303124static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3125{
3126 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3127 be_cmd_manage_iface(adapter, adapter->if_handle,
3128 OP_CONVERT_TUNNEL_TO_NORMAL);
3129
3130 if (adapter->vxlan_port)
3131 be_cmd_set_vxlan_port(adapter, 0);
3132
3133 adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3134 adapter->vxlan_port = 0;
3135}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303136#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05303137
Somnath Koturb05004a2013-12-05 12:08:16 +05303138static int be_clear(struct be_adapter *adapter)
3139{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303140 be_cancel_worker(adapter);
Sathya Perla191eb752012-02-23 18:50:13 +00003141
Sathya Perla11ac75e2011-12-13 00:58:50 +00003142 if (sriov_enabled(adapter))
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003143 be_vf_clear(adapter);
3144
Vasundhara Volambec84e62014-06-30 13:01:32 +05303145 /* Re-configure FW to distribute resources evenly across max-supported
3146 * number of VFs, only when VFs are not already enabled.
3147 */
3148 if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3149 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3150 pci_sriov_get_totalvfs(adapter->pdev));
3151
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303152#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05303153 be_disable_vxlan_offloads(adapter);
Sathya Perlac5abe7c2014-04-01 12:33:59 +05303154#endif
Sathya Perla2d17f402013-07-23 15:25:04 +05303155 /* delete the primary mac along with the uc-mac list */
Somnath Koturb05004a2013-12-05 12:08:16 +05303156 be_mac_clear(adapter);
Ajit Khapardefbc13f02012-03-18 06:23:21 +00003157
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003158 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003159
Sathya Perla77071332013-08-27 16:57:34 +05303160 be_clear_queues(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003161
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003162 be_msix_disable(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303163 adapter->flags &= ~BE_FLAGS_SETUP_DONE;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003164 return 0;
3165}
3166
Sathya Perla4c876612013-02-03 20:30:11 +00003167static int be_vfs_if_create(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003168{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303169 struct be_resources res = {0};
Sathya Perla4c876612013-02-03 20:30:11 +00003170 struct be_vf_cfg *vf_cfg;
3171 u32 cap_flags, en_flags, vf;
Antonio Alecrim Jr922bbe82013-09-13 14:05:49 -03003172 int status = 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003173
Sathya Perla4c876612013-02-03 20:30:11 +00003174 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3175 BE_IF_FLAGS_MULTICAST;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003176
Sathya Perla4c876612013-02-03 20:30:11 +00003177 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303178 if (!BE3_chip(adapter)) {
3179 status = be_cmd_get_profile_config(adapter, &res,
3180 vf + 1);
3181 if (!status)
3182 cap_flags = res.if_cap_flags;
3183 }
Sathya Perla4c876612013-02-03 20:30:11 +00003184
3185 /* If a FW profile exists, then cap_flags are updated */
3186 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
Sathya Perla748b5392014-05-09 13:29:13 +05303187 BE_IF_FLAGS_BROADCAST |
3188 BE_IF_FLAGS_MULTICAST);
3189 status =
3190 be_cmd_if_create(adapter, cap_flags, en_flags,
3191 &vf_cfg->if_handle, vf + 1);
Sathya Perla4c876612013-02-03 20:30:11 +00003192 if (status)
3193 goto err;
3194 }
3195err:
3196 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003197}
3198
Sathya Perla39f1d942012-05-08 19:41:24 +00003199static int be_vf_setup_init(struct be_adapter *adapter)
Sathya Perla30128032011-11-10 19:17:57 +00003200{
Sathya Perla11ac75e2011-12-13 00:58:50 +00003201 struct be_vf_cfg *vf_cfg;
Sathya Perla30128032011-11-10 19:17:57 +00003202 int vf;
3203
Sathya Perla39f1d942012-05-08 19:41:24 +00003204 adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3205 GFP_KERNEL);
3206 if (!adapter->vf_cfg)
3207 return -ENOMEM;
3208
Sathya Perla11ac75e2011-12-13 00:58:50 +00003209 for_all_vfs(adapter, vf_cfg, vf) {
3210 vf_cfg->if_handle = -1;
3211 vf_cfg->pmac_id = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003212 }
Sathya Perla39f1d942012-05-08 19:41:24 +00003213 return 0;
Sathya Perla30128032011-11-10 19:17:57 +00003214}
3215
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003216static int be_vf_setup(struct be_adapter *adapter)
3217{
Sathya Perla4c876612013-02-03 20:30:11 +00003218 struct device *dev = &adapter->pdev->dev;
Somnath Koturc5022242014-03-03 14:24:20 +05303219 struct be_vf_cfg *vf_cfg;
3220 int status, old_vfs, vf;
Sathya Perla04a06022013-07-23 15:25:00 +05303221 u32 privileges;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003222
Sathya Perla257a3fe2013-06-14 15:54:51 +05303223 old_vfs = pci_num_vf(adapter->pdev);
Sathya Perla39f1d942012-05-08 19:41:24 +00003224
3225 status = be_vf_setup_init(adapter);
3226 if (status)
3227 goto err;
Sathya Perla30128032011-11-10 19:17:57 +00003228
Sathya Perla4c876612013-02-03 20:30:11 +00003229 if (old_vfs) {
3230 for_all_vfs(adapter, vf_cfg, vf) {
3231 status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3232 if (status)
3233 goto err;
3234 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003235
Sathya Perla4c876612013-02-03 20:30:11 +00003236 status = be_vfs_mac_query(adapter);
3237 if (status)
3238 goto err;
3239 } else {
Vasundhara Volambec84e62014-06-30 13:01:32 +05303240 status = be_vfs_if_create(adapter);
3241 if (status)
3242 goto err;
3243
Sathya Perla39f1d942012-05-08 19:41:24 +00003244 status = be_vf_eth_addr_config(adapter);
3245 if (status)
3246 goto err;
3247 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003248
Sathya Perla11ac75e2011-12-13 00:58:50 +00003249 for_all_vfs(adapter, vf_cfg, vf) {
Sathya Perla04a06022013-07-23 15:25:00 +05303250 /* Allow VFs to programs MAC/VLAN filters */
3251 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3252 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3253 status = be_cmd_set_fn_privileges(adapter,
3254 privileges |
3255 BE_PRIV_FILTMGMT,
3256 vf + 1);
3257 if (!status)
3258 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3259 vf);
3260 }
3261
Ravikumar Nelavelli0f77ba72014-05-30 19:06:24 +05303262 /* Allow full available bandwidth */
3263 if (!old_vfs)
3264 be_cmd_config_qos(adapter, 0, 0, vf + 1);
Ajit Khapardef1f3ee12012-03-18 06:23:41 +00003265
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303266 if (!old_vfs) {
Vasundhara Volam05998632013-10-01 15:59:59 +05303267 be_cmd_enable_vf(adapter, vf + 1);
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303268 be_cmd_set_logical_link_config(adapter,
3269 IFLA_VF_LINK_STATE_AUTO,
3270 vf+1);
3271 }
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003272 }
Sathya Perlab4c1df92013-05-08 02:05:47 +00003273
3274 if (!old_vfs) {
3275 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3276 if (status) {
3277 dev_err(dev, "SRIOV enable failed\n");
3278 adapter->num_vfs = 0;
3279 goto err;
3280 }
3281 }
Vasundhara Volamf174c7e2014-07-17 16:20:31 +05303282
3283 adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003284 return 0;
3285err:
Sathya Perla4c876612013-02-03 20:30:11 +00003286 dev_err(dev, "VF setup failed\n");
3287 be_vf_clear(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003288 return status;
3289}
3290
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303291/* Converting function_mode bits on BE3 to SH mc_type enums */
3292
3293static u8 be_convert_mc_type(u32 function_mode)
3294{
Suresh Reddy66064db2014-06-23 16:41:29 +05303295 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303296 return vNIC1;
Suresh Reddy66064db2014-06-23 16:41:29 +05303297 else if (function_mode & QNQ_MODE)
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303298 return FLEX10;
3299 else if (function_mode & VNIC_MODE)
3300 return vNIC2;
3301 else if (function_mode & UMC_ENABLED)
3302 return UMC;
3303 else
3304 return MC_NONE;
3305}
3306
Sathya Perla92bf14a2013-08-27 16:57:32 +05303307/* On BE2/BE3 FW does not suggest the supported limits */
3308static void BEx_get_resources(struct be_adapter *adapter,
3309 struct be_resources *res)
3310{
Vasundhara Volambec84e62014-06-30 13:01:32 +05303311 bool use_sriov = adapter->num_vfs ? 1 : 0;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303312
3313 if (be_physfn(adapter))
3314 res->max_uc_mac = BE_UC_PMAC_COUNT;
3315 else
3316 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3317
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303318 adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3319
3320 if (be_is_mc(adapter)) {
3321 /* Assuming that there are 4 channels per port,
3322 * when multi-channel is enabled
3323 */
3324 if (be_is_qnq_mode(adapter))
3325 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3326 else
3327 /* In a non-qnq multichannel mode, the pvid
3328 * takes up one vlan entry
3329 */
3330 res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3331 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303332 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
Vasundhara Volamf93f1602014-02-12 16:09:25 +05303333 }
3334
Sathya Perla92bf14a2013-08-27 16:57:32 +05303335 res->max_mcast_mac = BE_MAX_MC;
3336
Vasundhara Volama5243da2014-03-11 18:53:07 +05303337 /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3338 * 2) Create multiple TX rings on a BE3-R multi-channel interface
3339 * *only* if it is RSS-capable.
3340 */
3341 if (BE2_chip(adapter) || use_sriov || (adapter->port_num > 1) ||
3342 !be_physfn(adapter) || (be_is_mc(adapter) &&
Suresh Reddya28277d2014-09-02 09:56:57 +05303343 !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303344 res->max_tx_qs = 1;
Suresh Reddya28277d2014-09-02 09:56:57 +05303345 } else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
3346 struct be_resources super_nic_res = {0};
3347
3348 /* On a SuperNIC profile, the driver needs to use the
3349 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
3350 */
3351 be_cmd_get_profile_config(adapter, &super_nic_res, 0);
3352 /* Some old versions of BE3 FW don't report max_tx_qs value */
3353 res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
3354 } else {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303355 res->max_tx_qs = BE3_MAX_TX_QS;
Suresh Reddya28277d2014-09-02 09:56:57 +05303356 }
Sathya Perla92bf14a2013-08-27 16:57:32 +05303357
3358 if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3359 !use_sriov && be_physfn(adapter))
3360 res->max_rss_qs = (adapter->be3_native) ?
3361 BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3362 res->max_rx_qs = res->max_rss_qs + 1;
3363
Suresh Reddye3dc8672014-01-06 13:02:25 +05303364 if (be_physfn(adapter))
Vasundhara Volamd3518e22014-07-17 16:20:29 +05303365 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
Suresh Reddye3dc8672014-01-06 13:02:25 +05303366 BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3367 else
3368 res->max_evt_qs = 1;
Sathya Perla92bf14a2013-08-27 16:57:32 +05303369
3370 res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3371 if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3372 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3373}
3374
Sathya Perla30128032011-11-10 19:17:57 +00003375static void be_setup_init(struct be_adapter *adapter)
3376{
3377 adapter->vlan_prio_bmap = 0xff;
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003378 adapter->phy.link_speed = -1;
Sathya Perla30128032011-11-10 19:17:57 +00003379 adapter->if_handle = -1;
3380 adapter->be3_native = false;
3381 adapter->promiscuous = false;
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003382 if (be_physfn(adapter))
3383 adapter->cmd_privileges = MAX_PRIVILEGES;
3384 else
3385 adapter->cmd_privileges = MIN_PRIVILEGES;
Sathya Perla30128032011-11-10 19:17:57 +00003386}
3387
Vasundhara Volambec84e62014-06-30 13:01:32 +05303388static int be_get_sriov_config(struct be_adapter *adapter)
3389{
3390 struct device *dev = &adapter->pdev->dev;
3391 struct be_resources res = {0};
Sathya Perlad3d18312014-08-01 17:47:30 +05303392 int max_vfs, old_vfs;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303393
3394 /* Some old versions of BE3 FW don't report max_vfs value */
Sathya Perlad3d18312014-08-01 17:47:30 +05303395 be_cmd_get_profile_config(adapter, &res, 0);
3396
Vasundhara Volambec84e62014-06-30 13:01:32 +05303397 if (BE3_chip(adapter) && !res.max_vfs) {
3398 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3399 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3400 }
3401
Sathya Perlad3d18312014-08-01 17:47:30 +05303402 adapter->pool_res = res;
Vasundhara Volambec84e62014-06-30 13:01:32 +05303403
3404 if (!be_max_vfs(adapter)) {
3405 if (num_vfs)
Vasundhara Volam50762662014-09-12 17:39:14 +05303406 dev_warn(dev, "SRIOV is disabled. Ignoring num_vfs\n");
Vasundhara Volambec84e62014-06-30 13:01:32 +05303407 adapter->num_vfs = 0;
3408 return 0;
3409 }
3410
Sathya Perlad3d18312014-08-01 17:47:30 +05303411 pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3412
Vasundhara Volambec84e62014-06-30 13:01:32 +05303413 /* validate num_vfs module param */
3414 old_vfs = pci_num_vf(adapter->pdev);
3415 if (old_vfs) {
3416 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3417 if (old_vfs != num_vfs)
3418 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3419 adapter->num_vfs = old_vfs;
3420 } else {
3421 if (num_vfs > be_max_vfs(adapter)) {
3422 dev_info(dev, "Resources unavailable to init %d VFs\n",
3423 num_vfs);
3424 dev_info(dev, "Limiting to %d VFs\n",
3425 be_max_vfs(adapter));
3426 }
3427 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3428 }
3429
3430 return 0;
3431}
3432
Sathya Perla92bf14a2013-08-27 16:57:32 +05303433static int be_get_resources(struct be_adapter *adapter)
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003434{
Sathya Perla92bf14a2013-08-27 16:57:32 +05303435 struct device *dev = &adapter->pdev->dev;
3436 struct be_resources res = {0};
3437 int status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003438
Sathya Perla92bf14a2013-08-27 16:57:32 +05303439 if (BEx_chip(adapter)) {
3440 BEx_get_resources(adapter, &res);
3441 adapter->res = res;
3442 }
3443
Sathya Perla92bf14a2013-08-27 16:57:32 +05303444 /* For Lancer, SH etc read per-function resource limits from FW.
3445 * GET_FUNC_CONFIG returns per function guaranteed limits.
3446 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3447 */
Sathya Perla4c876612013-02-03 20:30:11 +00003448 if (!BEx_chip(adapter)) {
Sathya Perla92bf14a2013-08-27 16:57:32 +05303449 status = be_cmd_get_func_config(adapter, &res);
3450 if (status)
3451 return status;
3452
3453 /* If RoCE may be enabled stash away half the EQs for RoCE */
3454 if (be_roce_supported(adapter))
3455 res.max_evt_qs /= 2;
3456 adapter->res = res;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003457 }
3458
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303459 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3460 be_max_txqs(adapter), be_max_rxqs(adapter),
3461 be_max_rss(adapter), be_max_eqs(adapter),
3462 be_max_vfs(adapter));
3463 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3464 be_max_uc(adapter), be_max_mc(adapter),
3465 be_max_vlans(adapter));
3466
Sathya Perla92bf14a2013-08-27 16:57:32 +05303467 return 0;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003468}
3469
Sathya Perlad3d18312014-08-01 17:47:30 +05303470static void be_sriov_config(struct be_adapter *adapter)
3471{
3472 struct device *dev = &adapter->pdev->dev;
3473 int status;
3474
3475 status = be_get_sriov_config(adapter);
3476 if (status) {
3477 dev_err(dev, "Failed to query SR-IOV configuration\n");
3478 dev_err(dev, "SR-IOV cannot be enabled\n");
3479 return;
3480 }
3481
3482 /* When the HW is in SRIOV capable configuration, the PF-pool
3483 * resources are equally distributed across the max-number of
3484 * VFs. The user may request only a subset of the max-vfs to be
3485 * enabled. Based on num_vfs, redistribute the resources across
3486 * num_vfs so that each VF will have access to more number of
3487 * resources. This facility is not available in BE3 FW.
3488 * Also, this is done by FW in Lancer chip.
3489 */
3490 if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3491 status = be_cmd_set_sriov_config(adapter,
3492 adapter->pool_res,
3493 adapter->num_vfs);
3494 if (status)
3495 dev_err(dev, "Failed to optimize SR-IOV resources\n");
3496 }
3497}
3498
Sathya Perla39f1d942012-05-08 19:41:24 +00003499static int be_get_config(struct be_adapter *adapter)
3500{
Vasundhara Volam542963b2014-01-15 13:23:33 +05303501 u16 profile_id;
Sathya Perla4c876612013-02-03 20:30:11 +00003502 int status;
Sathya Perla39f1d942012-05-08 19:41:24 +00003503
Kalesh APe97e3cd2014-07-17 16:20:26 +05303504 status = be_cmd_query_fw_cfg(adapter);
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003505 if (status)
Sathya Perla92bf14a2013-08-27 16:57:32 +05303506 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003507
Vasundhara Volam542963b2014-01-15 13:23:33 +05303508 if (be_physfn(adapter)) {
3509 status = be_cmd_get_active_profile(adapter, &profile_id);
3510 if (!status)
3511 dev_info(&adapter->pdev->dev,
3512 "Using profile 0x%x\n", profile_id);
Vasundhara Volam962bcb72014-07-17 16:20:30 +05303513 }
Vasundhara Volambec84e62014-06-30 13:01:32 +05303514
Sathya Perlad3d18312014-08-01 17:47:30 +05303515 if (!BE2_chip(adapter) && be_physfn(adapter))
3516 be_sriov_config(adapter);
Vasundhara Volam542963b2014-01-15 13:23:33 +05303517
Sathya Perla92bf14a2013-08-27 16:57:32 +05303518 status = be_get_resources(adapter);
3519 if (status)
3520 return status;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003521
Ravikumar Nelavelli46ee9c12014-03-11 18:53:06 +05303522 adapter->pmac_id = kcalloc(be_max_uc(adapter),
3523 sizeof(*adapter->pmac_id), GFP_KERNEL);
Sathya Perla92bf14a2013-08-27 16:57:32 +05303524 if (!adapter->pmac_id)
3525 return -ENOMEM;
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003526
Sathya Perla92bf14a2013-08-27 16:57:32 +05303527 /* Sanitize cfg_num_qs based on HW and platform limits */
3528 adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3529
3530 return 0;
Sathya Perla39f1d942012-05-08 19:41:24 +00003531}
3532
Sathya Perla95046b92013-07-23 15:25:02 +05303533static int be_mac_setup(struct be_adapter *adapter)
3534{
3535 u8 mac[ETH_ALEN];
3536 int status;
3537
3538 if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3539 status = be_cmd_get_perm_mac(adapter, mac);
3540 if (status)
3541 return status;
3542
3543 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3544 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3545 } else {
3546 /* Maybe the HW was reset; dev_addr must be re-programmed */
3547 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3548 }
3549
Ajit Khaparde2c7a9dc2013-11-22 12:51:28 -06003550 /* For BE3-R VFs, the PF programs the initial MAC address */
3551 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3552 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3553 &adapter->pmac_id[0], 0);
Sathya Perla95046b92013-07-23 15:25:02 +05303554 return 0;
3555}
3556
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303557static void be_schedule_worker(struct be_adapter *adapter)
3558{
3559 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3560 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3561}
3562
Sathya Perla77071332013-08-27 16:57:34 +05303563static int be_setup_queues(struct be_adapter *adapter)
3564{
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303565 struct net_device *netdev = adapter->netdev;
Sathya Perla77071332013-08-27 16:57:34 +05303566 int status;
3567
3568 status = be_evt_queues_create(adapter);
3569 if (status)
3570 goto err;
3571
3572 status = be_tx_qs_create(adapter);
3573 if (status)
3574 goto err;
3575
3576 status = be_rx_cqs_create(adapter);
3577 if (status)
3578 goto err;
3579
3580 status = be_mcc_queues_create(adapter);
3581 if (status)
3582 goto err;
3583
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303584 status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3585 if (status)
3586 goto err;
3587
3588 status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3589 if (status)
3590 goto err;
3591
Sathya Perla77071332013-08-27 16:57:34 +05303592 return 0;
3593err:
3594 dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3595 return status;
3596}
3597
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303598int be_update_queues(struct be_adapter *adapter)
3599{
3600 struct net_device *netdev = adapter->netdev;
3601 int status;
3602
3603 if (netif_running(netdev))
3604 be_close(netdev);
3605
3606 be_cancel_worker(adapter);
3607
3608 /* If any vectors have been shared with RoCE we cannot re-program
3609 * the MSIx table.
3610 */
3611 if (!adapter->num_msix_roce_vec)
3612 be_msix_disable(adapter);
3613
3614 be_clear_queues(adapter);
3615
3616 if (!msix_enabled(adapter)) {
3617 status = be_msix_enable(adapter);
3618 if (status)
3619 return status;
3620 }
3621
3622 status = be_setup_queues(adapter);
3623 if (status)
3624 return status;
3625
3626 be_schedule_worker(adapter);
3627
3628 if (netif_running(netdev))
3629 status = be_open(netdev);
3630
3631 return status;
3632}
3633
Sathya Perla5fb379e2009-06-18 00:02:59 +00003634static int be_setup(struct be_adapter *adapter)
3635{
Sathya Perla39f1d942012-05-08 19:41:24 +00003636 struct device *dev = &adapter->pdev->dev;
Sathya Perla77071332013-08-27 16:57:34 +05303637 u32 tx_fc, rx_fc, en_flags;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003638 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003639
Sathya Perla30128032011-11-10 19:17:57 +00003640 be_setup_init(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003641
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003642 if (!lancer_chip(adapter))
3643 be_cmd_req_native_mode(adapter);
Sathya Perla39f1d942012-05-08 19:41:24 +00003644
Padmanabh Ratnakarabb93952012-10-20 06:01:41 +00003645 status = be_get_config(adapter);
3646 if (status)
3647 goto err;
Sathya Perla2dc1deb2011-07-19 19:52:33 +00003648
Somnath Koturc2bba3d2013-05-02 03:37:08 +00003649 status = be_msix_enable(adapter);
3650 if (status)
3651 goto err;
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003652
Sathya Perla77071332013-08-27 16:57:34 +05303653 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3654 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3655 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3656 en_flags |= BE_IF_FLAGS_RSS;
3657 en_flags = en_flags & be_if_cap_flags(adapter);
3658 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3659 &adapter->if_handle, 0);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003660 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003661 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003662
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303663 /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3664 rtnl_lock();
Sathya Perla77071332013-08-27 16:57:34 +05303665 status = be_setup_queues(adapter);
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303666 rtnl_unlock();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003667 if (status)
Sathya Perlaa54769f2011-10-24 02:45:00 +00003668 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003669
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003670 be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003671
Sathya Perla95046b92013-07-23 15:25:02 +05303672 status = be_mac_setup(adapter);
3673 if (status)
Padmanabh Ratnakar1578e772012-06-07 04:37:08 +00003674 goto err;
3675
Kalesh APe97e3cd2014-07-17 16:20:26 +05303676 be_cmd_get_fw_ver(adapter);
Sathya Perlaacbafeb2014-09-02 09:56:46 +05303677 dev_info(dev, "FW version is %s\n", adapter->fw_ver);
Somnath Kotur5a56eb12011-09-30 07:24:28 +00003678
Somnath Koture9e2a902013-10-24 14:37:53 +05303679 if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
Vasundhara Volam50762662014-09-12 17:39:14 +05303680 dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
Somnath Koture9e2a902013-10-24 14:37:53 +05303681 adapter->fw_ver);
3682 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3683 }
3684
Sathya Perla1d1e9a42012-06-05 19:37:17 +00003685 if (adapter->vlans_added)
Sathya Perla10329df2012-06-05 19:37:18 +00003686 be_vid_config(adapter);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003687
3688 be_set_rx_mode(adapter->netdev);
3689
Suresh Reddy76a9e082014-01-15 13:23:40 +05303690 be_cmd_get_acpi_wol_cap(adapter);
3691
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003692 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
Padmanabh Ratnakar590c3912011-11-25 05:47:26 +00003693
Ajit Khapardeddc3f5c2012-04-26 15:42:31 +00003694 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3695 be_cmd_set_flow_control(adapter, adapter->tx_fc,
Sathya Perlaa54769f2011-10-24 02:45:00 +00003696 adapter->rx_fc);
Sathya Perlaa54769f2011-10-24 02:45:00 +00003697
Suresh Reddybdce2ad2014-03-11 18:53:04 +05303698 if (be_physfn(adapter))
3699 be_cmd_set_logical_link_config(adapter,
3700 IFLA_VF_LINK_STATE_AUTO, 0);
3701
Vasundhara Volambec84e62014-06-30 13:01:32 +05303702 if (adapter->num_vfs)
3703 be_vf_setup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003704
Padmanabh Ratnakarf25b1192012-10-20 06:02:52 +00003705 status = be_cmd_get_phy_info(adapter);
3706 if (!status && be_pause_supported(adapter))
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003707 adapter->phy.fc_autoneg = 1;
3708
Sathya Perla68d7bdc2013-08-27 16:57:35 +05303709 be_schedule_worker(adapter);
Kalesh APe1ad8e32014-04-14 16:12:41 +05303710 adapter->flags |= BE_FLAGS_SETUP_DONE;
Sathya Perlaf9449ab2011-10-24 02:45:01 +00003711 return 0;
Sathya Perlaa54769f2011-10-24 02:45:00 +00003712err:
3713 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003714 return status;
3715}
3716
Ivan Vecera66268732011-12-08 01:31:21 +00003717#ifdef CONFIG_NET_POLL_CONTROLLER
3718static void be_netpoll(struct net_device *netdev)
3719{
3720 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00003721 struct be_eq_obj *eqo;
Ivan Vecera66268732011-12-08 01:31:21 +00003722 int i;
3723
Sathya Perlae49cc342012-11-27 19:50:02 +00003724 for_all_evt_queues(adapter, eqo, i) {
3725 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3726 napi_schedule(&eqo->napi);
3727 }
Ivan Vecera66268732011-12-08 01:31:21 +00003728}
3729#endif
3730
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303731static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08003732
Sathya Perla306f1342011-08-02 19:57:45 +00003733static bool phy_flashing_required(struct be_adapter *adapter)
3734{
Ajit Khaparde42f11cf2012-04-21 18:53:22 +00003735 return (adapter->phy.phy_type == TN_8022 &&
3736 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
Sathya Perla306f1342011-08-02 19:57:45 +00003737}
3738
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003739static bool is_comp_in_ufi(struct be_adapter *adapter,
3740 struct flash_section_info *fsec, int type)
3741{
3742 int i = 0, img_type = 0;
3743 struct flash_section_info_g2 *fsec_g2 = NULL;
3744
Sathya Perlaca34fe32012-11-06 17:48:56 +00003745 if (BE2_chip(adapter))
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003746 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3747
3748 for (i = 0; i < MAX_FLASH_COMP; i++) {
3749 if (fsec_g2)
3750 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3751 else
3752 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3753
3754 if (img_type == type)
3755 return true;
3756 }
3757 return false;
3758
3759}
3760
Jingoo Han4188e7d2013-08-05 18:02:02 +09003761static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303762 int header_size,
3763 const struct firmware *fw)
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003764{
3765 struct flash_section_info *fsec = NULL;
3766 const u8 *p = fw->data;
3767
3768 p += header_size;
3769 while (p < (fw->data + fw->size)) {
3770 fsec = (struct flash_section_info *)p;
3771 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3772 return fsec;
3773 p += 32;
3774 }
3775 return NULL;
3776}
3777
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303778static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3779 u32 img_offset, u32 img_size, int hdr_size,
3780 u16 img_optype, bool *crc_match)
3781{
3782 u32 crc_offset;
3783 int status;
3784 u8 crc[4];
3785
3786 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3787 if (status)
3788 return status;
3789
3790 crc_offset = hdr_size + img_offset + img_size - 4;
3791
3792 /* Skip flashing, if crc of flashed region matches */
3793 if (!memcmp(crc, p + crc_offset, 4))
3794 *crc_match = true;
3795 else
3796 *crc_match = false;
3797
3798 return status;
3799}
3800
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003801static int be_flash(struct be_adapter *adapter, const u8 *img,
Sathya Perla748b5392014-05-09 13:29:13 +05303802 struct be_dma_mem *flash_cmd, int optype, int img_size)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003803{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003804 struct be_cmd_write_flashrom *req = flash_cmd->va;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303805 u32 total_bytes, flash_op, num_bytes;
3806 int status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003807
3808 total_bytes = img_size;
3809 while (total_bytes) {
3810 num_bytes = min_t(u32, 32*1024, total_bytes);
3811
3812 total_bytes -= num_bytes;
3813
3814 if (!total_bytes) {
3815 if (optype == OPTYPE_PHY_FW)
3816 flash_op = FLASHROM_OPER_PHY_FLASH;
3817 else
3818 flash_op = FLASHROM_OPER_FLASH;
3819 } else {
3820 if (optype == OPTYPE_PHY_FW)
3821 flash_op = FLASHROM_OPER_PHY_SAVE;
3822 else
3823 flash_op = FLASHROM_OPER_SAVE;
3824 }
3825
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00003826 memcpy(req->data_buf, img, num_bytes);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003827 img += num_bytes;
3828 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303829 flash_op, num_bytes);
Kalesh AP4c600052014-05-30 19:06:26 +05303830 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303831 optype == OPTYPE_PHY_FW)
3832 break;
3833 else if (status)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003834 return status;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003835 }
3836 return 0;
3837}
3838
Vasundhara Volam0ad31572013-04-21 23:28:16 +00003839/* For BE2, BE3 and BE3-R */
Sathya Perlaca34fe32012-11-06 17:48:56 +00003840static int be_flash_BEx(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05303841 const struct firmware *fw,
3842 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde84517482009-09-04 03:12:16 +00003843{
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003844 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303845 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003846 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303847 int status, i, filehdr_size, num_comp;
3848 const struct flash_comp *pflashcomp;
3849 bool crc_match;
3850 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00003851
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003852 struct flash_comp gen3_flash_types[] = {
3853 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3854 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3855 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3856 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3857 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3858 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3859 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3860 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3861 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3862 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3863 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3864 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3865 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3866 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3867 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3868 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3869 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3870 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3871 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3872 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003873 };
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003874
3875 struct flash_comp gen2_flash_types[] = {
3876 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3877 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3878 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3879 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3880 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3881 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3882 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3883 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3884 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3885 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3886 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3887 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3888 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3889 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3890 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3891 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003892 };
3893
Sathya Perlaca34fe32012-11-06 17:48:56 +00003894 if (BE3_chip(adapter)) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003895 pflashcomp = gen3_flash_types;
3896 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08003897 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00003898 } else {
3899 pflashcomp = gen2_flash_types;
3900 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08003901 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00003902 }
Sathya Perlaca34fe32012-11-06 17:48:56 +00003903
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003904 /* Get flash section info*/
3905 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3906 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303907 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003908 return -1;
3909 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003910 for (i = 0; i < num_comp; i++) {
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003911 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00003912 continue;
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003913
3914 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3915 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3916 continue;
3917
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003918 if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
3919 !phy_flashing_required(adapter))
3920 continue;
3921
3922 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303923 status = be_check_flash_crc(adapter, fw->data,
3924 pflashcomp[i].offset,
3925 pflashcomp[i].size,
3926 filehdr_size +
3927 img_hdrs_size,
3928 OPTYPE_REDBOOT, &crc_match);
3929 if (status) {
3930 dev_err(dev,
3931 "Could not get CRC for 0x%x region\n",
3932 pflashcomp[i].optype);
3933 continue;
3934 }
3935
3936 if (crc_match)
Sathya Perla306f1342011-08-02 19:57:45 +00003937 continue;
3938 }
Padmanabh Ratnakarc165541e2012-04-25 01:47:15 +00003939
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303940 p = fw->data + filehdr_size + pflashcomp[i].offset +
3941 img_hdrs_size;
Sathya Perla306f1342011-08-02 19:57:45 +00003942 if (p + pflashcomp[i].size > fw->data + fw->size)
3943 return -1;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003944
3945 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
Sathya Perla748b5392014-05-09 13:29:13 +05303946 pflashcomp[i].size);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003947 if (status) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303948 dev_err(dev, "Flashing section type 0x%x failed\n",
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00003949 pflashcomp[i].img_type);
3950 return status;
Ajit Khaparde84517482009-09-04 03:12:16 +00003951 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003952 }
Ajit Khaparde84517482009-09-04 03:12:16 +00003953 return 0;
3954}
3955
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05303956static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3957{
3958 u32 img_type = le32_to_cpu(fsec_entry.type);
3959 u16 img_optype = le16_to_cpu(fsec_entry.optype);
3960
3961 if (img_optype != 0xFFFF)
3962 return img_optype;
3963
3964 switch (img_type) {
3965 case IMAGE_FIRMWARE_iSCSI:
3966 img_optype = OPTYPE_ISCSI_ACTIVE;
3967 break;
3968 case IMAGE_BOOT_CODE:
3969 img_optype = OPTYPE_REDBOOT;
3970 break;
3971 case IMAGE_OPTION_ROM_ISCSI:
3972 img_optype = OPTYPE_BIOS;
3973 break;
3974 case IMAGE_OPTION_ROM_PXE:
3975 img_optype = OPTYPE_PXE_BIOS;
3976 break;
3977 case IMAGE_OPTION_ROM_FCoE:
3978 img_optype = OPTYPE_FCOE_BIOS;
3979 break;
3980 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3981 img_optype = OPTYPE_ISCSI_BACKUP;
3982 break;
3983 case IMAGE_NCSI:
3984 img_optype = OPTYPE_NCSI_FW;
3985 break;
3986 case IMAGE_FLASHISM_JUMPVECTOR:
3987 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3988 break;
3989 case IMAGE_FIRMWARE_PHY:
3990 img_optype = OPTYPE_SH_PHY_FW;
3991 break;
3992 case IMAGE_REDBOOT_DIR:
3993 img_optype = OPTYPE_REDBOOT_DIR;
3994 break;
3995 case IMAGE_REDBOOT_CONFIG:
3996 img_optype = OPTYPE_REDBOOT_CONFIG;
3997 break;
3998 case IMAGE_UFI_DIR:
3999 img_optype = OPTYPE_UFI_DIR;
4000 break;
4001 default:
4002 break;
4003 }
4004
4005 return img_optype;
4006}
4007
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004008static int be_flash_skyhawk(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304009 const struct firmware *fw,
4010 struct be_dma_mem *flash_cmd, int num_of_images)
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004011{
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004012 int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304013 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004014 struct flash_section_info *fsec = NULL;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304015 u32 img_offset, img_size, img_type;
4016 int status, i, filehdr_size;
4017 bool crc_match, old_fw_img;
4018 u16 img_optype;
4019 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004020
4021 filehdr_size = sizeof(struct flash_file_hdr_g3);
4022 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
4023 if (!fsec) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304024 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
Kalesh AP56ace3a2014-07-17 16:20:20 +05304025 return -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004026 }
4027
4028 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
4029 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
4030 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304031 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
4032 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
4033 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004034
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304035 if (img_optype == 0xFFFF)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004036 continue;
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304037 /* Don't bother verifying CRC if an old FW image is being
4038 * flashed
4039 */
4040 if (old_fw_img)
4041 goto flash;
4042
4043 status = be_check_flash_crc(adapter, fw->data, img_offset,
4044 img_size, filehdr_size +
4045 img_hdrs_size, img_optype,
4046 &crc_match);
4047 /* The current FW image on the card does not recognize the new
4048 * FLASH op_type. The FW download is partially complete.
4049 * Reboot the server now to enable FW image to recognize the
4050 * new FLASH op_type. To complete the remaining process,
4051 * download the same FW again after the reboot.
4052 */
Kalesh AP4c600052014-05-30 19:06:26 +05304053 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4054 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304055 dev_err(dev, "Flash incomplete. Reset the server\n");
4056 dev_err(dev, "Download FW image again after reset\n");
4057 return -EAGAIN;
4058 } else if (status) {
4059 dev_err(dev, "Could not get CRC for 0x%x region\n",
4060 img_optype);
4061 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004062 }
4063
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304064 if (crc_match)
4065 continue;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004066
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304067flash:
4068 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004069 if (p + img_size > fw->data + fw->size)
4070 return -1;
4071
4072 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304073 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4074 * UFI_DIR region
4075 */
Kalesh AP4c600052014-05-30 19:06:26 +05304076 if (old_fw_img &&
4077 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4078 (img_optype == OPTYPE_UFI_DIR &&
4079 base_status(status) == MCC_STATUS_FAILED))) {
Vasundhara Volam96c9b2e2014-05-30 19:06:25 +05304080 continue;
4081 } else if (status) {
4082 dev_err(dev, "Flashing section type 0x%x failed\n",
4083 img_type);
4084 return -EFAULT;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004085 }
4086 }
4087 return 0;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004088}
4089
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004090static int lancer_fw_download(struct be_adapter *adapter,
Sathya Perla748b5392014-05-09 13:29:13 +05304091 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00004092{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004093#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
4094#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
Kalesh APbb864e02014-09-02 09:56:51 +05304095 struct device *dev = &adapter->pdev->dev;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004096 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004097 const u8 *data_ptr = NULL;
4098 u8 *dest_image_ptr = NULL;
4099 size_t image_size = 0;
4100 u32 chunk_size = 0;
4101 u32 data_written = 0;
4102 u32 offset = 0;
4103 int status = 0;
4104 u8 add_status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004105 u8 change_status;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004106
4107 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
Kalesh APbb864e02014-09-02 09:56:51 +05304108 dev_err(dev, "FW image size should be multiple of 4\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304109 return -EINVAL;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004110 }
4111
4112 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4113 + LANCER_FW_DOWNLOAD_CHUNK;
Kalesh APbb864e02014-09-02 09:56:51 +05304114 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
Joe Perchesd0320f72013-03-14 13:07:21 +00004115 &flash_cmd.dma, GFP_KERNEL);
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304116 if (!flash_cmd.va)
4117 return -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004118
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004119 dest_image_ptr = flash_cmd.va +
4120 sizeof(struct lancer_cmd_req_write_object);
4121 image_size = fw->size;
4122 data_ptr = fw->data;
4123
4124 while (image_size) {
4125 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4126
4127 /* Copy the image chunk content. */
4128 memcpy(dest_image_ptr, data_ptr, chunk_size);
4129
4130 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004131 chunk_size, offset,
4132 LANCER_FW_DOWNLOAD_LOCATION,
4133 &data_written, &change_status,
4134 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004135 if (status)
4136 break;
4137
4138 offset += data_written;
4139 data_ptr += data_written;
4140 image_size -= data_written;
4141 }
4142
4143 if (!status) {
4144 /* Commit the FW written */
4145 status = lancer_cmd_write_object(adapter, &flash_cmd,
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004146 0, offset,
4147 LANCER_FW_DOWNLOAD_LOCATION,
4148 &data_written, &change_status,
4149 &add_status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004150 }
4151
Kalesh APbb864e02014-09-02 09:56:51 +05304152 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004153 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304154 dev_err(dev, "Firmware load error\n");
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304155 return be_cmd_status(status);
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004156 }
4157
Kalesh APbb864e02014-09-02 09:56:51 +05304158 dev_info(dev, "Firmware flashed successfully\n");
4159
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004160 if (change_status == LANCER_FW_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304161 dev_info(dev, "Resetting adapter to activate new FW\n");
Somnath Kotur5c510812013-05-30 02:52:23 +00004162 status = lancer_physdev_ctrl(adapter,
4163 PHYSDEV_CONTROL_FW_RESET_MASK);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004164 if (status) {
Kalesh APbb864e02014-09-02 09:56:51 +05304165 dev_err(dev, "Adapter busy, could not reset FW\n");
4166 dev_err(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004167 }
4168 } else if (change_status != LANCER_NO_RESET_NEEDED) {
Kalesh APbb864e02014-09-02 09:56:51 +05304169 dev_info(dev, "Reboot server to activate new FW\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004170 }
Kalesh AP3fb8cb82014-09-02 09:56:52 +05304171
4172 return 0;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004173}
4174
Sathya Perlaca34fe32012-11-06 17:48:56 +00004175#define UFI_TYPE2 2
4176#define UFI_TYPE3 3
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004177#define UFI_TYPE3R 10
Sathya Perlaca34fe32012-11-06 17:48:56 +00004178#define UFI_TYPE4 4
4179static int be_get_ufi_type(struct be_adapter *adapter,
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004180 struct flash_file_hdr_g3 *fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004181{
Kalesh APddf11692014-07-17 16:20:28 +05304182 if (!fhdr)
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004183 goto be_get_ufi_exit;
4184
Sathya Perlaca34fe32012-11-06 17:48:56 +00004185 if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4186 return UFI_TYPE4;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004187 else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4188 if (fhdr->asic_type_rev == 0x10)
4189 return UFI_TYPE3R;
4190 else
4191 return UFI_TYPE3;
4192 } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
Sathya Perlaca34fe32012-11-06 17:48:56 +00004193 return UFI_TYPE2;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004194
4195be_get_ufi_exit:
4196 dev_err(&adapter->pdev->dev,
4197 "UFI and Interface are not compatible for flashing\n");
4198 return -1;
4199}
4200
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004201static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4202{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004203 struct flash_file_hdr_g3 *fhdr3;
4204 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00004205 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00004206 const u8 *p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004207 int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00004208
Padmanabh Ratnakarbe716442012-10-22 23:02:44 +00004209 flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004210 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4211 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00004212 if (!flash_cmd.va) {
4213 status = -ENOMEM;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004214 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004215 }
4216
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004217 p = fw->data;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004218 fhdr3 = (struct flash_file_hdr_g3 *)p;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004219
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004220 ufi_type = be_get_ufi_type(adapter, fhdr3);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004221
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004222 num_imgs = le32_to_cpu(fhdr3->num_imgs);
4223 for (i = 0; i < num_imgs; i++) {
4224 img_hdr_ptr = (struct image_hdr *)(fw->data +
4225 (sizeof(struct flash_file_hdr_g3) +
4226 i * sizeof(struct image_hdr)));
4227 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004228 switch (ufi_type) {
4229 case UFI_TYPE4:
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004230 status = be_flash_skyhawk(adapter, fw,
Sathya Perla748b5392014-05-09 13:29:13 +05304231 &flash_cmd, num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004232 break;
4233 case UFI_TYPE3R:
Sathya Perlaca34fe32012-11-06 17:48:56 +00004234 status = be_flash_BEx(adapter, fw, &flash_cmd,
4235 num_imgs);
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004236 break;
4237 case UFI_TYPE3:
4238 /* Do not flash this ufi on BE3-R cards */
4239 if (adapter->asic_rev < 0x10)
4240 status = be_flash_BEx(adapter, fw,
4241 &flash_cmd,
4242 num_imgs);
4243 else {
Kalesh AP56ace3a2014-07-17 16:20:20 +05304244 status = -EINVAL;
Vasundhara Volam0ad31572013-04-21 23:28:16 +00004245 dev_err(&adapter->pdev->dev,
4246 "Can't load BE3 UFI on BE3R\n");
4247 }
4248 }
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00004249 }
Ajit Khaparde84517482009-09-04 03:12:16 +00004250 }
4251
Sathya Perlaca34fe32012-11-06 17:48:56 +00004252 if (ufi_type == UFI_TYPE2)
4253 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004254 else if (ufi_type == -1)
Kalesh AP56ace3a2014-07-17 16:20:20 +05304255 status = -EINVAL;
Padmanabh Ratnakar773a2d72012-10-20 06:04:16 +00004256
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004257 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4258 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00004259 if (status) {
4260 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004261 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00004262 }
4263
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02004264 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00004265
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004266be_fw_exit:
4267 return status;
4268}
4269
4270int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4271{
4272 const struct firmware *fw;
4273 int status;
4274
4275 if (!netif_running(adapter->netdev)) {
4276 dev_err(&adapter->pdev->dev,
4277 "Firmware load not allowed (interface is down)\n");
Kalesh AP940a3fc2014-07-17 16:20:19 +05304278 return -ENETDOWN;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00004279 }
4280
4281 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4282 if (status)
4283 goto fw_exit;
4284
4285 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4286
4287 if (lancer_chip(adapter))
4288 status = lancer_fw_download(adapter, fw);
4289 else
4290 status = be_fw_download(adapter, fw);
4291
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004292 if (!status)
Kalesh APe97e3cd2014-07-17 16:20:26 +05304293 be_cmd_get_fw_ver(adapter);
Somnath Kotureeb65ce2013-05-26 21:08:36 +00004294
Ajit Khaparde84517482009-09-04 03:12:16 +00004295fw_exit:
4296 release_firmware(fw);
4297 return status;
4298}
4299
Sathya Perla748b5392014-05-09 13:29:13 +05304300static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004301{
4302 struct be_adapter *adapter = netdev_priv(dev);
4303 struct nlattr *attr, *br_spec;
4304 int rem;
4305 int status = 0;
4306 u16 mode = 0;
4307
4308 if (!sriov_enabled(adapter))
4309 return -EOPNOTSUPP;
4310
4311 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4312
4313 nla_for_each_nested(attr, br_spec, rem) {
4314 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4315 continue;
4316
4317 mode = nla_get_u16(attr);
4318 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4319 return -EINVAL;
4320
4321 status = be_cmd_set_hsw_config(adapter, 0, 0,
4322 adapter->if_handle,
4323 mode == BRIDGE_MODE_VEPA ?
4324 PORT_FWD_TYPE_VEPA :
4325 PORT_FWD_TYPE_VEB);
4326 if (status)
4327 goto err;
4328
4329 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4330 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4331
4332 return status;
4333 }
4334err:
4335 dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4336 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4337
4338 return status;
4339}
4340
4341static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
Sathya Perla748b5392014-05-09 13:29:13 +05304342 struct net_device *dev, u32 filter_mask)
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004343{
4344 struct be_adapter *adapter = netdev_priv(dev);
4345 int status = 0;
4346 u8 hsw_mode;
4347
4348 if (!sriov_enabled(adapter))
4349 return 0;
4350
4351 /* BE and Lancer chips support VEB mode only */
4352 if (BEx_chip(adapter) || lancer_chip(adapter)) {
4353 hsw_mode = PORT_FWD_TYPE_VEB;
4354 } else {
4355 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4356 adapter->if_handle, &hsw_mode);
4357 if (status)
4358 return 0;
4359 }
4360
4361 return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4362 hsw_mode == PORT_FWD_TYPE_VEPA ?
4363 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4364}
4365
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304366#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304367static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4368 __be16 port)
4369{
4370 struct be_adapter *adapter = netdev_priv(netdev);
4371 struct device *dev = &adapter->pdev->dev;
4372 int status;
4373
4374 if (lancer_chip(adapter) || BEx_chip(adapter))
4375 return;
4376
4377 if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4378 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4379 be16_to_cpu(port));
4380 dev_info(dev,
4381 "Only one UDP port supported for VxLAN offloads\n");
4382 return;
4383 }
4384
4385 status = be_cmd_manage_iface(adapter, adapter->if_handle,
4386 OP_CONVERT_NORMAL_TO_TUNNEL);
4387 if (status) {
4388 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4389 goto err;
4390 }
4391
4392 status = be_cmd_set_vxlan_port(adapter, port);
4393 if (status) {
4394 dev_warn(dev, "Failed to add VxLAN port\n");
4395 goto err;
4396 }
4397 adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4398 adapter->vxlan_port = port;
4399
4400 dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4401 be16_to_cpu(port));
4402 return;
4403err:
4404 be_disable_vxlan_offloads(adapter);
Sathya Perlac9c47142014-03-27 10:46:19 +05304405}
4406
4407static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4408 __be16 port)
4409{
4410 struct be_adapter *adapter = netdev_priv(netdev);
4411
4412 if (lancer_chip(adapter) || BEx_chip(adapter))
4413 return;
4414
4415 if (adapter->vxlan_port != port)
4416 return;
4417
4418 be_disable_vxlan_offloads(adapter);
4419
4420 dev_info(&adapter->pdev->dev,
4421 "Disabled VxLAN offloads for UDP port %d\n",
4422 be16_to_cpu(port));
4423}
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304424#endif
Sathya Perlac9c47142014-03-27 10:46:19 +05304425
stephen hemmingere5686ad2012-01-05 19:10:25 +00004426static const struct net_device_ops be_netdev_ops = {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004427 .ndo_open = be_open,
4428 .ndo_stop = be_close,
4429 .ndo_start_xmit = be_xmit,
Sathya Perlaa54769f2011-10-24 02:45:00 +00004430 .ndo_set_rx_mode = be_set_rx_mode,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004431 .ndo_set_mac_address = be_mac_addr_set,
4432 .ndo_change_mtu = be_change_mtu,
Sathya Perlaab1594e2011-07-25 19:10:15 +00004433 .ndo_get_stats64 = be_get_stats64,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004434 .ndo_validate_addr = eth_validate_addr,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004435 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
4436 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00004437 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00004438 .ndo_set_vf_vlan = be_set_vf_vlan,
Sucheta Chakrabortyed616682014-05-22 09:59:05 -04004439 .ndo_set_vf_rate = be_set_vf_tx_rate,
Ivan Vecera66268732011-12-08 01:31:21 +00004440 .ndo_get_vf_config = be_get_vf_config,
Suresh Reddybdce2ad2014-03-11 18:53:04 +05304441 .ndo_set_vf_link_state = be_set_vf_link_state,
Ivan Vecera66268732011-12-08 01:31:21 +00004442#ifdef CONFIG_NET_POLL_CONTROLLER
4443 .ndo_poll_controller = be_netpoll,
4444#endif
Ajit Khapardea77dcb82013-08-30 15:01:16 -05004445 .ndo_bridge_setlink = be_ndo_bridge_setlink,
4446 .ndo_bridge_getlink = be_ndo_bridge_getlink,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304447#ifdef CONFIG_NET_RX_BUSY_POLL
Sathya Perlac9c47142014-03-27 10:46:19 +05304448 .ndo_busy_poll = be_busy_poll,
Sathya Perla6384a4d2013-10-25 10:40:16 +05304449#endif
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304450#ifdef CONFIG_BE2NET_VXLAN
Sathya Perlac9c47142014-03-27 10:46:19 +05304451 .ndo_add_vxlan_port = be_add_vxlan_port,
4452 .ndo_del_vxlan_port = be_del_vxlan_port,
Sathya Perlac5abe7c2014-04-01 12:33:59 +05304453#endif
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004454};
4455
4456static void be_netdev_init(struct net_device *netdev)
4457{
4458 struct be_adapter *adapter = netdev_priv(netdev);
4459
Sathya Perlac9c47142014-03-27 10:46:19 +05304460 if (skyhawk_chip(adapter)) {
4461 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4462 NETIF_F_TSO | NETIF_F_TSO6 |
4463 NETIF_F_GSO_UDP_TUNNEL;
4464 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4465 }
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004466 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004467 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
Patrick McHardyf6469682013-04-19 02:04:27 +00004468 NETIF_F_HW_VLAN_CTAG_TX;
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00004469 if (be_multi_rxq(adapter))
4470 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00004471
4472 netdev->features |= netdev->hw_features |
Patrick McHardyf6469682013-04-19 02:04:27 +00004473 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00004474
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07004475 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00004476 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00004477
Ajit Khapardefbc13f02012-03-18 06:23:21 +00004478 netdev->priv_flags |= IFF_UNICAST_FLT;
4479
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004480 netdev->flags |= IFF_MULTICAST;
4481
Sarveshwar Bandib7e58872012-06-13 19:51:43 +00004482 netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00004483
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004484 netdev->netdev_ops = &be_netdev_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004485
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00004486 netdev->ethtool_ops = &be_ethtool_ops;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004487}
4488
4489static void be_unmap_pci_bars(struct be_adapter *adapter)
4490{
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004491 if (adapter->csr)
4492 pci_iounmap(adapter->pdev, adapter->csr);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004493 if (adapter->db)
Sathya Perlace66f782012-11-06 17:48:58 +00004494 pci_iounmap(adapter->pdev, adapter->db);
Parav Pandit045508a2012-03-26 14:27:13 +00004495}
4496
Sathya Perlace66f782012-11-06 17:48:58 +00004497static int db_bar(struct be_adapter *adapter)
4498{
4499 if (lancer_chip(adapter) || !be_physfn(adapter))
4500 return 0;
4501 else
4502 return 4;
4503}
4504
4505static int be_roce_map_pci_bars(struct be_adapter *adapter)
Parav Pandit045508a2012-03-26 14:27:13 +00004506{
Sathya Perladbf0f2a2012-11-06 17:49:00 +00004507 if (skyhawk_chip(adapter)) {
Sathya Perlace66f782012-11-06 17:48:58 +00004508 adapter->roce_db.size = 4096;
4509 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4510 db_bar(adapter));
4511 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4512 db_bar(adapter));
4513 }
Parav Pandit045508a2012-03-26 14:27:13 +00004514 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004515}
4516
4517static int be_map_pci_bars(struct be_adapter *adapter)
4518{
4519 u8 __iomem *addr;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004520
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004521 if (BEx_chip(adapter) && be_physfn(adapter)) {
4522 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
Kalesh APddf11692014-07-17 16:20:28 +05304523 if (!adapter->csr)
Sathya Perlac5b3ad42013-03-05 22:23:20 +00004524 return -ENOMEM;
4525 }
4526
Sathya Perlace66f782012-11-06 17:48:58 +00004527 addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
Kalesh APddf11692014-07-17 16:20:28 +05304528 if (!addr)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004529 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00004530 adapter->db = addr;
Sathya Perlace66f782012-11-06 17:48:58 +00004531
4532 be_roce_map_pci_bars(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004533 return 0;
Sathya Perlace66f782012-11-06 17:48:58 +00004534
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004535pci_map_err:
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304536 dev_err(&adapter->pdev->dev, "Error in mapping PCI BARs\n");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004537 be_unmap_pci_bars(adapter);
4538 return -ENOMEM;
4539}
4540
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004541static void be_ctrl_cleanup(struct be_adapter *adapter)
4542{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004543 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004544
4545 be_unmap_pci_bars(adapter);
4546
4547 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004548 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4549 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004550
Sathya Perla5b8821b2011-08-02 19:57:44 +00004551 mem = &adapter->rx_filter;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004552 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004553 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4554 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004555}
4556
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004557static int be_ctrl_init(struct be_adapter *adapter)
4558{
Sathya Perla8788fdc2009-07-27 22:52:03 +00004559 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4560 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perla5b8821b2011-08-02 19:57:44 +00004561 struct be_dma_mem *rx_filter = &adapter->rx_filter;
Sathya Perlace66f782012-11-06 17:48:58 +00004562 u32 sli_intf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004563 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004564
Sathya Perlace66f782012-11-06 17:48:58 +00004565 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4566 adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4567 SLI_INTF_FAMILY_SHIFT;
4568 adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4569
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004570 status = be_map_pci_bars(adapter);
4571 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00004572 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004573
4574 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004575 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4576 mbox_mem_alloc->size,
4577 &mbox_mem_alloc->dma,
4578 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004579 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004580 status = -ENOMEM;
4581 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004582 }
4583 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4584 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4585 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4586 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00004587
Sathya Perla5b8821b2011-08-02 19:57:44 +00004588 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
Joe Perchesede23fa2013-08-26 22:45:23 -07004589 rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4590 rx_filter->size, &rx_filter->dma,
4591 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304592 if (!rx_filter->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00004593 status = -ENOMEM;
4594 goto free_mbox;
4595 }
Joe Perches1f9061d22013-03-15 07:23:58 +00004596
Ivan Vecera29849612010-12-14 05:43:19 +00004597 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00004598 spin_lock_init(&adapter->mcc_lock);
4599 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004600
Suresh Reddy5eeff632014-01-06 13:02:24 +05304601 init_completion(&adapter->et_cmd_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00004602 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004603 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00004604
4605free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004606 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4607 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00004608
4609unmap_pci_bars:
4610 be_unmap_pci_bars(adapter);
4611
4612done:
4613 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004614}
4615
4616static void be_stats_cleanup(struct be_adapter *adapter)
4617{
Sathya Perla3abcded2010-10-03 22:12:27 -07004618 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004619
4620 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00004621 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4622 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004623}
4624
4625static int be_stats_init(struct be_adapter *adapter)
4626{
Sathya Perla3abcded2010-10-03 22:12:27 -07004627 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004628
Sathya Perlaca34fe32012-11-06 17:48:56 +00004629 if (lancer_chip(adapter))
4630 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4631 else if (BE2_chip(adapter))
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00004632 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Ajit Khaparde61000862013-10-03 16:16:33 -05004633 else if (BE3_chip(adapter))
Sathya Perlaca34fe32012-11-06 17:48:56 +00004634 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
Ajit Khaparde61000862013-10-03 16:16:33 -05004635 else
4636 /* ALL non-BE ASICs */
4637 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
Sathya Perlaca34fe32012-11-06 17:48:56 +00004638
Joe Perchesede23fa2013-08-26 22:45:23 -07004639 cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4640 GFP_KERNEL);
Kalesh APddf11692014-07-17 16:20:28 +05304641 if (!cmd->va)
Kalesh AP6b568682014-07-17 16:20:22 +05304642 return -ENOMEM;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004643 return 0;
4644}
4645
Bill Pemberton3bc6b062012-12-03 09:23:09 -05004646static void be_remove(struct pci_dev *pdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004647{
4648 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00004649
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004650 if (!adapter)
4651 return;
4652
Parav Pandit045508a2012-03-26 14:27:13 +00004653 be_roce_dev_remove(adapter);
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004654 be_intr_set(adapter, false);
Parav Pandit045508a2012-03-26 14:27:13 +00004655
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004656 cancel_delayed_work_sync(&adapter->func_recovery_work);
4657
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004658 unregister_netdev(adapter->netdev);
4659
Sathya Perla5fb379e2009-06-18 00:02:59 +00004660 be_clear(adapter);
4661
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004662 /* tell fw we're done with firing cmds */
4663 be_cmd_fw_clean(adapter);
4664
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004665 be_stats_cleanup(adapter);
4666
4667 be_ctrl_cleanup(adapter);
4668
Sathya Perlad6b6d982012-09-05 01:56:48 +00004669 pci_disable_pcie_error_reporting(pdev);
4670
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004671 pci_release_regions(pdev);
4672 pci_disable_device(pdev);
4673
4674 free_netdev(adapter->netdev);
4675}
4676
Sathya Perla39f1d942012-05-08 19:41:24 +00004677static int be_get_initial_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004678{
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304679 int status, level;
Sathya Perla43a04fdc2009-10-14 20:21:17 +00004680
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00004681 status = be_cmd_get_cntl_attributes(adapter);
4682 if (status)
4683 return status;
4684
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004685 /* Must be a power of 2 or else MODULO will BUG_ON */
4686 adapter->be_get_temp_freq = 64;
4687
Vasundhara Volambaaa08d2014-01-15 13:23:34 +05304688 if (BEx_chip(adapter)) {
4689 level = be_cmd_get_fw_log_level(adapter);
4690 adapter->msg_enable =
4691 level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4692 }
Somnath Kotur941a77d2012-05-17 22:59:03 +00004693
Sathya Perla92bf14a2013-08-27 16:57:32 +05304694 adapter->cfg_num_qs = netif_get_num_default_rss_queues();
Sathya Perla2243e2e2009-11-22 22:02:03 +00004695 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004696}
4697
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004698static int lancer_recover_func(struct be_adapter *adapter)
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004699{
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004700 struct device *dev = &adapter->pdev->dev;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004701 int status;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004702
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004703 status = lancer_test_and_set_rdy_state(adapter);
4704 if (status)
4705 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004706
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004707 if (netif_running(adapter->netdev))
4708 be_close(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004709
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004710 be_clear(adapter);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004711
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004712 be_clear_all_error(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004713
4714 status = be_setup(adapter);
4715 if (status)
4716 goto err;
4717
4718 if (netif_running(adapter->netdev)) {
4719 status = be_open(adapter->netdev);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004720 if (status)
4721 goto err;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004722 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004723
Somnath Kotur4bebb562013-12-05 12:07:55 +05304724 dev_err(dev, "Adapter recovery successful\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004725 return 0;
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004726err:
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004727 if (status == -EAGAIN)
4728 dev_err(dev, "Waiting for resource provisioning\n");
4729 else
Somnath Kotur4bebb562013-12-05 12:07:55 +05304730 dev_err(dev, "Adapter recovery failed\n");
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004731
4732 return status;
4733}
4734
4735static void be_func_recovery_task(struct work_struct *work)
4736{
4737 struct be_adapter *adapter =
4738 container_of(work, struct be_adapter, func_recovery_work.work);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004739 int status = 0;
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004740
4741 be_detect_error(adapter);
4742
4743 if (adapter->hw_error && lancer_chip(adapter)) {
4744
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004745 rtnl_lock();
4746 netif_device_detach(adapter->netdev);
4747 rtnl_unlock();
4748
4749 status = lancer_recover_func(adapter);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004750 if (!status)
4751 netif_device_attach(adapter->netdev);
4752 }
4753
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00004754 /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4755 * no need to attempt further recovery.
4756 */
4757 if (!status || status == -EAGAIN)
4758 schedule_delayed_work(&adapter->func_recovery_work,
4759 msecs_to_jiffies(1000));
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004760}
4761
4762static void be_worker(struct work_struct *work)
4763{
4764 struct be_adapter *adapter =
4765 container_of(work, struct be_adapter, work.work);
4766 struct be_rx_obj *rxo;
4767 int i;
4768
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004769 /* when interrupts are not yet enabled, just reap any pending
4770 * mcc completions */
4771 if (!netif_running(adapter->netdev)) {
Amerigo Wang072a9c42012-08-24 21:41:11 +00004772 local_bh_disable();
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004773 be_process_mcc(adapter);
Amerigo Wang072a9c42012-08-24 21:41:11 +00004774 local_bh_enable();
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004775 goto reschedule;
4776 }
4777
4778 if (!adapter->stats_cmd_sent) {
4779 if (lancer_chip(adapter))
4780 lancer_cmd_get_pport_stats(adapter,
4781 &adapter->stats_cmd);
4782 else
4783 be_cmd_get_stats(adapter, &adapter->stats_cmd);
4784 }
4785
Vasundhara Volamd696b5e2013-08-06 09:27:16 +05304786 if (be_physfn(adapter) &&
4787 MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
Padmanabh Ratnakar7aeb2152012-07-12 03:56:46 +00004788 be_cmd_get_die_temperature(adapter);
4789
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004790 for_all_rx_queues(adapter, rxo, i) {
Sathya Perla6384a4d2013-10-25 10:40:16 +05304791 /* Replenish RX-queues starved due to memory
4792 * allocation failures.
4793 */
4794 if (rxo->rx_post_starved)
Ajit Khapardec30d7262014-09-12 17:39:16 +05304795 be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004796 }
4797
Sathya Perla2632baf2013-10-01 16:00:00 +05304798 be_eqd_update(adapter);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004799
Padmanabh Ratnakard8110f62011-11-25 05:48:23 +00004800reschedule:
4801 adapter->work_counter++;
4802 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4803}
4804
Sathya Perla257a3fe2013-06-14 15:54:51 +05304805/* If any VFs are already enabled don't FLR the PF */
Sathya Perla39f1d942012-05-08 19:41:24 +00004806static bool be_reset_required(struct be_adapter *adapter)
4807{
Sathya Perla257a3fe2013-06-14 15:54:51 +05304808 return pci_num_vf(adapter->pdev) ? false : true;
Sathya Perla39f1d942012-05-08 19:41:24 +00004809}
4810
Sathya Perlad3791422012-09-28 04:39:44 +00004811static char *mc_name(struct be_adapter *adapter)
4812{
Vasundhara Volamf93f1602014-02-12 16:09:25 +05304813 char *str = ""; /* default */
4814
4815 switch (adapter->mc_type) {
4816 case UMC:
4817 str = "UMC";
4818 break;
4819 case FLEX10:
4820 str = "FLEX10";
4821 break;
4822 case vNIC1:
4823 str = "vNIC-1";
4824 break;
4825 case nPAR:
4826 str = "nPAR";
4827 break;
4828 case UFP:
4829 str = "UFP";
4830 break;
4831 case vNIC2:
4832 str = "vNIC-2";
4833 break;
4834 default:
4835 str = "";
4836 }
4837
4838 return str;
Sathya Perlad3791422012-09-28 04:39:44 +00004839}
4840
4841static inline char *func_name(struct be_adapter *adapter)
4842{
4843 return be_physfn(adapter) ? "PF" : "VF";
4844}
4845
Greg Kroah-Hartman1dd06ae2012-12-06 14:30:56 +00004846static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004847{
4848 int status = 0;
4849 struct be_adapter *adapter;
4850 struct net_device *netdev;
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004851 char port_name;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004852
Sathya Perlaacbafeb2014-09-02 09:56:46 +05304853 dev_info(&pdev->dev, "%s version is %s\n", DRV_NAME, DRV_VER);
4854
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004855 status = pci_enable_device(pdev);
4856 if (status)
4857 goto do_none;
4858
4859 status = pci_request_regions(pdev, DRV_NAME);
4860 if (status)
4861 goto disable_dev;
4862 pci_set_master(pdev);
4863
Sathya Perla7f640062012-06-05 19:37:20 +00004864 netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
Kalesh APddf11692014-07-17 16:20:28 +05304865 if (!netdev) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004866 status = -ENOMEM;
4867 goto rel_reg;
4868 }
4869 adapter = netdev_priv(netdev);
4870 adapter->pdev = pdev;
4871 pci_set_drvdata(pdev, adapter);
4872 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004873 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004874
Russell King4c15c242013-06-26 23:49:11 +01004875 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004876 if (!status) {
4877 netdev->features |= NETIF_F_HIGHDMA;
4878 } else {
Russell King4c15c242013-06-26 23:49:11 +01004879 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004880 if (status) {
4881 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4882 goto free_netdev;
4883 }
4884 }
4885
Kalesh AP2f951a92014-09-12 17:39:21 +05304886 status = pci_enable_pcie_error_reporting(pdev);
4887 if (!status)
4888 dev_info(&pdev->dev, "PCIe error reporting enabled\n");
Sathya Perlad6b6d982012-09-05 01:56:48 +00004889
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004890 status = be_ctrl_init(adapter);
4891 if (status)
Sathya Perla39f1d942012-05-08 19:41:24 +00004892 goto free_netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004893
Sathya Perla2243e2e2009-11-22 22:02:03 +00004894 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004895 if (be_physfn(adapter)) {
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00004896 status = be_fw_wait_ready(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004897 if (status)
4898 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00004899 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00004900
Sathya Perla39f1d942012-05-08 19:41:24 +00004901 if (be_reset_required(adapter)) {
4902 status = be_cmd_reset_function(adapter);
4903 if (status)
4904 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07004905
Kalesh AP2d177be2013-04-28 22:22:29 +00004906 /* Wait for interrupts to quiesce after an FLR */
4907 msleep(100);
4908 }
Somnath Kotur8cef7a72013-03-14 02:41:51 +00004909
4910 /* Allow interrupts for other ULPs running on NIC function */
4911 be_intr_set(adapter, true);
Sathya Perla10ef9ab2012-02-09 18:05:27 +00004912
Kalesh AP2d177be2013-04-28 22:22:29 +00004913 /* tell fw we're ready to fire cmds */
4914 status = be_cmd_fw_init(adapter);
4915 if (status)
4916 goto ctrl_clean;
4917
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004918 status = be_stats_init(adapter);
4919 if (status)
4920 goto ctrl_clean;
4921
Sathya Perla39f1d942012-05-08 19:41:24 +00004922 status = be_get_initial_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004923 if (status)
4924 goto stats_clean;
4925
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004926 INIT_DELAYED_WORK(&adapter->work, be_worker);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004927 INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
Sathya Perlaa54769f2011-10-24 02:45:00 +00004928 adapter->rx_fc = adapter->tx_fc = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004929
Sathya Perla5fb379e2009-06-18 00:02:59 +00004930 status = be_setup(adapter);
4931 if (status)
Sathya Perla55f5c3c2012-09-28 04:39:42 +00004932 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00004933
Sathya Perla3abcded2010-10-03 22:12:27 -07004934 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004935 status = register_netdev(netdev);
4936 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00004937 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004938
Parav Pandit045508a2012-03-26 14:27:13 +00004939 be_roce_dev_add(adapter);
4940
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004941 schedule_delayed_work(&adapter->func_recovery_work,
4942 msecs_to_jiffies(1000));
Padmanabh Ratnakarb4e32a72012-07-12 03:57:35 +00004943
4944 be_cmd_query_port_name(adapter, &port_name);
4945
Sathya Perlad3791422012-09-28 04:39:44 +00004946 dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4947 func_name(adapter), mc_name(adapter), port_name);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00004948
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004949 return 0;
4950
Sathya Perla5fb379e2009-06-18 00:02:59 +00004951unsetup:
4952 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004953stats_clean:
4954 be_stats_cleanup(adapter);
4955ctrl_clean:
4956 be_ctrl_cleanup(adapter);
Sathya Perlaf9449ab2011-10-24 02:45:01 +00004957free_netdev:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00004958 free_netdev(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004959rel_reg:
4960 pci_release_regions(pdev);
4961disable_dev:
4962 pci_disable_device(pdev);
4963do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07004964 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004965 return status;
4966}
4967
4968static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4969{
4970 struct be_adapter *adapter = pci_get_drvdata(pdev);
4971 struct net_device *netdev = adapter->netdev;
4972
Suresh Reddy76a9e082014-01-15 13:23:40 +05304973 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00004974 be_setup_wol(adapter, true);
4975
Ajit Khaparded4360d62013-11-22 12:51:09 -06004976 be_intr_set(adapter, false);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00004977 cancel_delayed_work_sync(&adapter->func_recovery_work);
4978
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004979 netif_device_detach(netdev);
4980 if (netif_running(netdev)) {
4981 rtnl_lock();
4982 be_close(netdev);
4983 rtnl_unlock();
4984 }
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00004985 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07004986
4987 pci_save_state(pdev);
4988 pci_disable_device(pdev);
4989 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4990 return 0;
4991}
4992
4993static int be_resume(struct pci_dev *pdev)
4994{
4995 int status = 0;
4996 struct be_adapter *adapter = pci_get_drvdata(pdev);
4997 struct net_device *netdev = adapter->netdev;
4998
4999 netif_device_detach(netdev);
5000
5001 status = pci_enable_device(pdev);
5002 if (status)
5003 return status;
5004
Yijing Wang1ca01512013-06-27 20:53:42 +08005005 pci_set_power_state(pdev, PCI_D0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005006 pci_restore_state(pdev);
5007
Sarveshwar Bandidd5746b2013-08-23 14:59:33 +05305008 status = be_fw_wait_ready(adapter);
5009 if (status)
5010 return status;
5011
Ajit Khaparded4360d62013-11-22 12:51:09 -06005012 be_intr_set(adapter, true);
Sathya Perla2243e2e2009-11-22 22:02:03 +00005013 /* tell fw we're ready to fire cmds */
5014 status = be_cmd_fw_init(adapter);
5015 if (status)
5016 return status;
5017
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00005018 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005019 if (netif_running(netdev)) {
5020 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005021 be_open(netdev);
5022 rtnl_unlock();
5023 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005024
5025 schedule_delayed_work(&adapter->func_recovery_work,
5026 msecs_to_jiffies(1000));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005027 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005028
Suresh Reddy76a9e082014-01-15 13:23:40 +05305029 if (adapter->wol_en)
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00005030 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005031
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005032 return 0;
5033}
5034
Sathya Perla82456b02010-02-17 01:35:37 +00005035/*
5036 * An FLR will stop BE from DMAing any data.
5037 */
5038static void be_shutdown(struct pci_dev *pdev)
5039{
5040 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005041
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005042 if (!adapter)
5043 return;
Sathya Perla82456b02010-02-17 01:35:37 +00005044
Devesh Sharmad114f992014-06-10 19:32:15 +05305045 be_roce_dev_shutdown(adapter);
Sathya Perla0f4a6822011-03-21 20:49:28 +00005046 cancel_delayed_work_sync(&adapter->work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005047 cancel_delayed_work_sync(&adapter->func_recovery_work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00005048
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00005049 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005050
Ajit Khaparde57841862011-04-06 18:08:43 +00005051 be_cmd_reset_function(adapter);
5052
Sathya Perla82456b02010-02-17 01:35:37 +00005053 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00005054}
5055
Sathya Perlacf588472010-02-14 21:22:01 +00005056static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
Sathya Perla748b5392014-05-09 13:29:13 +05305057 pci_channel_state_t state)
Sathya Perlacf588472010-02-14 21:22:01 +00005058{
5059 struct be_adapter *adapter = pci_get_drvdata(pdev);
5060 struct net_device *netdev = adapter->netdev;
5061
5062 dev_err(&adapter->pdev->dev, "EEH error detected\n");
5063
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005064 if (!adapter->eeh_error) {
5065 adapter->eeh_error = true;
Sathya Perlacf588472010-02-14 21:22:01 +00005066
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005067 cancel_delayed_work_sync(&adapter->func_recovery_work);
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005068
Sathya Perlacf588472010-02-14 21:22:01 +00005069 rtnl_lock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005070 netif_device_detach(netdev);
5071 if (netif_running(netdev))
5072 be_close(netdev);
Sathya Perlacf588472010-02-14 21:22:01 +00005073 rtnl_unlock();
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005074
5075 be_clear(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005076 }
Sathya Perlacf588472010-02-14 21:22:01 +00005077
5078 if (state == pci_channel_io_perm_failure)
5079 return PCI_ERS_RESULT_DISCONNECT;
5080
5081 pci_disable_device(pdev);
5082
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005083 /* The error could cause the FW to trigger a flash debug dump.
5084 * Resetting the card while flash dump is in progress
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005085 * can cause it not to recover; wait for it to finish.
5086 * Wait only for first function as it is needed only once per
5087 * adapter.
Somnath Kotureeb7fc72012-05-02 03:41:01 +00005088 */
Padmanabh Ratnakarc8a54162012-10-20 06:03:37 +00005089 if (pdev->devfn == 0)
5090 ssleep(30);
5091
Sathya Perlacf588472010-02-14 21:22:01 +00005092 return PCI_ERS_RESULT_NEED_RESET;
5093}
5094
5095static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5096{
5097 struct be_adapter *adapter = pci_get_drvdata(pdev);
5098 int status;
5099
5100 dev_info(&adapter->pdev->dev, "EEH reset\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005101
5102 status = pci_enable_device(pdev);
5103 if (status)
5104 return PCI_ERS_RESULT_DISCONNECT;
5105
5106 pci_set_master(pdev);
Yijing Wang1ca01512013-06-27 20:53:42 +08005107 pci_set_power_state(pdev, PCI_D0);
Sathya Perlacf588472010-02-14 21:22:01 +00005108 pci_restore_state(pdev);
5109
5110 /* Check if card is ok and fw is ready */
Sathya Perlac5b3ad42013-03-05 22:23:20 +00005111 dev_info(&adapter->pdev->dev,
5112 "Waiting for FW to be ready after EEH reset\n");
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005113 status = be_fw_wait_ready(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005114 if (status)
5115 return PCI_ERS_RESULT_DISCONNECT;
5116
Sathya Perlad6b6d982012-09-05 01:56:48 +00005117 pci_cleanup_aer_uncorrect_error_status(pdev);
Somnath Kotur01e5b2c2013-05-29 22:56:17 +00005118 be_clear_all_error(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005119 return PCI_ERS_RESULT_RECOVERED;
5120}
5121
5122static void be_eeh_resume(struct pci_dev *pdev)
5123{
5124 int status = 0;
5125 struct be_adapter *adapter = pci_get_drvdata(pdev);
5126 struct net_device *netdev = adapter->netdev;
5127
5128 dev_info(&adapter->pdev->dev, "EEH resume\n");
5129
5130 pci_save_state(pdev);
5131
Kalesh AP2d177be2013-04-28 22:22:29 +00005132 status = be_cmd_reset_function(adapter);
Sathya Perlacf588472010-02-14 21:22:01 +00005133 if (status)
5134 goto err;
5135
Kalesh AP03a58ba2014-05-13 14:03:11 +05305136 /* On some BE3 FW versions, after a HW reset,
5137 * interrupts will remain disabled for each function.
5138 * So, explicitly enable interrupts
5139 */
5140 be_intr_set(adapter, true);
5141
Kalesh AP2d177be2013-04-28 22:22:29 +00005142 /* tell fw we're ready to fire cmds */
5143 status = be_cmd_fw_init(adapter);
Padmanabh Ratnakarbf99e502012-07-12 03:56:58 +00005144 if (status)
5145 goto err;
5146
Sathya Perlacf588472010-02-14 21:22:01 +00005147 status = be_setup(adapter);
5148 if (status)
5149 goto err;
5150
5151 if (netif_running(netdev)) {
5152 status = be_open(netdev);
5153 if (status)
5154 goto err;
5155 }
Padmanabh Ratnakarf67ef7b2012-07-12 03:57:09 +00005156
5157 schedule_delayed_work(&adapter->func_recovery_work,
5158 msecs_to_jiffies(1000));
Sathya Perlacf588472010-02-14 21:22:01 +00005159 netif_device_attach(netdev);
5160 return;
5161err:
5162 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00005163}
5164
Stephen Hemminger3646f0e2012-09-07 09:33:15 -07005165static const struct pci_error_handlers be_eeh_handlers = {
Sathya Perlacf588472010-02-14 21:22:01 +00005166 .error_detected = be_eeh_err_detected,
5167 .slot_reset = be_eeh_reset,
5168 .resume = be_eeh_resume,
5169};
5170
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005171static struct pci_driver be_driver = {
5172 .name = DRV_NAME,
5173 .id_table = be_dev_ids,
5174 .probe = be_probe,
5175 .remove = be_remove,
5176 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00005177 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00005178 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00005179 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005180};
5181
5182static int __init be_init_module(void)
5183{
Joe Perches8e95a202009-12-03 07:58:21 +00005184 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5185 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005186 printk(KERN_WARNING DRV_NAME
5187 " : Module param rx_frag_size must be 2048/4096/8192."
5188 " Using 2048\n");
5189 rx_frag_size = 2048;
5190 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005191
Sathya Perla6b7c5b92009-03-11 23:32:03 -07005192 return pci_register_driver(&be_driver);
5193}
5194module_init(be_init_module);
5195
5196static void __exit be_exit_module(void)
5197{
5198 pci_unregister_driver(&be_driver);
5199}
5200module_exit(be_exit_module);